Merge changes from topic "nnapi-op-resolver"

* changes:
  Regenerate tests with the recent changes
  Add new op SLICE
  Add float16 support for ARGMAX and ARGMIN
  Replace uses of kOperationNames[type] with getOperationName(type)
  Add OperationValidationTest for ROI_POOLING and ROI_ALIGN.
  Add new op ROI_POOLING.
  Converts examples from global to static local.
  Fix VTS validation test crashes.
  Add new op ABS
  Modify Gather to use OperationResolver
  Add OperationResolver
  Adds float16 support for DEPTHWISE_CONV_2D.
  Adds float16 support for STRIDED_SLICE.
  Adds float16 support for size/shape operations.
  Support inputs with different quantization parameters in CONCATENATION
  Fix types and add missing tests
diff --git a/nn/common/Android.bp b/nn/common/Android.bp
index afa4289..0d61715 100644
--- a/nn/common/Android.bp
+++ b/nn/common/Android.bp
@@ -20,9 +20,18 @@
     export_include_dirs: ["include"],
 }
 
+cc_defaults {
+    name: "neuralnetworks_operations",
+    srcs: [
+        "OperationResolver.cpp",
+        "operations/Abs.cpp",
+        "operations/Gather.cpp",
+    ],
+}
+
 cc_library_static {
     name: "libneuralnetworks_utils",
-    defaults: ["neuralnetworks_defaults"],
+    defaults: ["neuralnetworks_defaults", "neuralnetworks_operations"],
     host_supported: false,
     vendor_available: true,
     export_include_dirs: ["include"],
@@ -45,7 +54,7 @@
 
 cc_library_static {
     name: "libneuralnetworks_common",
-    defaults: ["neuralnetworks_defaults"],
+    defaults: ["neuralnetworks_defaults", "neuralnetworks_operations"],
     host_supported: false,
     vendor_available: true,
     // b/109953668, disable OpenMP
@@ -69,7 +78,6 @@
         "operations/EmbeddingLookup.cpp",
         "operations/ExpandDims.cpp",
         "operations/FullyConnected.cpp",
-        "operations/Gather.cpp",
         "operations/GenerateProposals.cpp",
         "operations/GroupedConv2D.cpp",
         "operations/HashtableLookup.cpp",
@@ -81,12 +89,14 @@
         "operations/Normalization.cpp",
         "operations/Pooling.cpp",
         "operations/Pow.cpp",
-	"operations/TopK_V2.cpp",
+        "operations/TopK_V2.cpp",
         "operations/QuantizedLSTM.cpp",
         "operations/Reshape.cpp",
         "operations/RNN.cpp",
         "operations/RoiAlign.cpp",
+        "operations/RoiPooling.cpp",
         "operations/SimpleMath.cpp",
+        "operations/Slice.cpp",
         "operations/Split.cpp",
         "operations/StridedSlice.cpp",
         "operations/SVDF.cpp",
@@ -121,6 +131,7 @@
     ],
 
     cflags: [
+        "-DNN_INCLUDE_CPU_IMPLEMENTATION",
         "-DNAMESPACE_FOR_HASH_FUNCTIONS=farmhash",
         "-Werror",
         "-Wall",
@@ -132,7 +143,6 @@
         "-Wno-unused-variable",
         "-Wno-invalid-partial-specialization",
     ],
-
 }
 
 cc_test {
diff --git a/nn/common/CpuExecutor.cpp b/nn/common/CpuExecutor.cpp
index 4e662fb..9d7beb3 100644
--- a/nn/common/CpuExecutor.cpp
+++ b/nn/common/CpuExecutor.cpp
@@ -19,7 +19,9 @@
 #include "CpuExecutor.h"
 
 #include "NeuralNetworks.h"
+#include "OperationResolver.h"
 #include "Operations.h"
+#include "OperationsUtils.h"
 #include "Tracing.h"
 
 #include "Eigen/Core"
@@ -32,6 +34,115 @@
 namespace android {
 namespace nn {
 
+namespace {
+
+class OperationExecutionContext : public IOperationExecutionContext {
+    DISALLOW_IMPLICIT_CONSTRUCTORS(OperationExecutionContext);
+
+   public:
+    OperationExecutionContext(const Operation* operation, RunTimeOperandInfo* operands)
+        : operation(operation), operands(operands) {}
+
+    uint32_t getNumInputs() const override;
+    OperandType getInputType(uint32_t index) const override;
+    Shape getInputShape(uint32_t index) const override;
+    const void* getInputBuffer(uint32_t index) const override;
+
+    uint32_t getNumOutputs() const override;
+    OperandType getOutputType(uint32_t index) const override;
+    Shape getOutputShape(uint32_t index) const override;
+    void* getOutputBuffer(uint32_t index) override;
+
+    // Requests the output buffer to be resized. Updates the output shape.
+    bool resizeOutputTensor(uint32_t index, const Shape& shape) override;
+
+   private:
+    const RunTimeOperandInfo* getInputInfo(uint32_t index) const;
+    const RunTimeOperandInfo* getOutputInfo(uint32_t index) const;
+    RunTimeOperandInfo* getOutputInfo(uint32_t index);
+
+    const Operation* operation;
+    RunTimeOperandInfo* operands;
+};
+
+const RunTimeOperandInfo* OperationExecutionContext::getInputInfo(uint32_t index) const {
+    CHECK(index < operation->inputs.size());
+    return &operands[operation->inputs[index]];
+}
+
+const RunTimeOperandInfo* OperationExecutionContext::getOutputInfo(uint32_t index) const {
+    CHECK(index < operation->outputs.size());
+    return &operands[operation->outputs[index]];
+}
+
+RunTimeOperandInfo* OperationExecutionContext::getOutputInfo(uint32_t index) {
+    CHECK(index < operation->outputs.size());
+    return &operands[operation->outputs[index]];
+}
+
+OperandType OperationExecutionContext::getInputType(uint32_t index) const {
+    return getInputInfo(index)->type;
+}
+
+Shape OperationExecutionContext::getInputShape(uint32_t index) const {
+    return getInputInfo(index)->shape();
+}
+
+const void* OperationExecutionContext::getInputBuffer(uint32_t index) const {
+    return getInputInfo(index)->buffer;
+}
+
+OperandType OperationExecutionContext::getOutputType(uint32_t index) const {
+    return getOutputInfo(index)->type;
+}
+
+Shape OperationExecutionContext::getOutputShape(uint32_t index) const {
+    return getOutputInfo(index)->shape();
+}
+
+void* OperationExecutionContext::getOutputBuffer(uint32_t index) {
+    return getOutputInfo(index)->buffer;
+}
+
+uint32_t OperationExecutionContext::getNumInputs() const {
+    return operation->inputs.size();
+}
+
+uint32_t OperationExecutionContext::getNumOutputs() const {
+    return operation->outputs.size();
+}
+
+// Updates the RunTimeOperandInfo with the newly calculated shape.
+// Allocate the buffer if we need to.
+bool setInfoAndAllocateIfNeeded(RunTimeOperandInfo* info, const Shape& shape) {
+    // For user-provided model output operands, the parameters must match the Shape
+    // calculated from the preparation step.
+    if (info->lifetime == OperandLifeTime::MODEL_OUTPUT) {
+        NN_RET_CHECK(info->type == shape.type) << "Invalid type for model output";
+        NN_RET_CHECK(info->dimensions == shape.dimensions) << "Invalid dimensions for model output";
+        if (info->type == OperandType::TENSOR_QUANT8_ASYMM) {
+            NN_RET_CHECK_EQ(info->scale, shape.scale) << "Invalid scale for model output";
+            NN_RET_CHECK_EQ(info->zeroPoint, shape.offset) << "Invalid zeroPoint for model output";
+        }
+    }
+    info->type = shape.type;
+    info->dimensions = shape.dimensions;
+    info->scale = shape.scale;
+    info->zeroPoint = shape.offset;
+    if (info->lifetime == OperandLifeTime::TEMPORARY_VARIABLE && info->buffer == nullptr) {
+        uint32_t length = sizeOfData(info->type, info->dimensions);
+        info->buffer = new uint8_t[length];
+        NN_RET_CHECK(info->buffer != nullptr);
+    }
+    return true;
+}
+
+bool OperationExecutionContext::resizeOutputTensor(uint32_t index, const Shape& shape) {
+    return setInfoAndAllocateIfNeeded(getOutputInfo(index), shape);
+}
+
+}  // namespace
+
 // TODO: short term, make share memory mapping and updating a utility function.
 // TODO: long term, implement mmap_fd as a hidl IMemory service.
 RunTimePoolInfo::RunTimePoolInfo(const hidl_memory& hidlMemory, bool* fail) {
@@ -156,38 +267,6 @@
     }
     return true;
 }
-
-// Updates the RunTimeOperandInfo with the newly calculated shape.
-// Allocate the buffer if we need to.
-static bool setInfoAndAllocateIfNeeded(RunTimeOperandInfo* info, const Shape& shape) {
-    // For user-provided model output operands, the parameters must match the Shape
-    // calculated from the preparation step.
-    if (info->lifetime == OperandLifeTime::MODEL_OUTPUT) {
-        if (info->type != shape.type ||
-            info->dimensions != shape.dimensions) {
-            LOG(ERROR) << "Invalid type or dimensions for model output";
-            return false;
-        }
-        if (info->type == OperandType::TENSOR_QUANT8_ASYMM &&
-            (info->scale != shape.scale || info->zeroPoint != shape.offset)) {
-            LOG(ERROR) << "Invalid scale or zeroPoint for model output";
-            return false;
-        }
-    }
-    info->type = shape.type;
-    info->dimensions = shape.dimensions;
-    info->scale = shape.scale;
-    info->zeroPoint = shape.offset;
-    if (info->lifetime == OperandLifeTime::TEMPORARY_VARIABLE && info->buffer == nullptr) {
-        uint32_t length = sizeOfData(info->type, info->dimensions);
-        info->buffer = new uint8_t[length];
-        if (info->buffer == nullptr) {
-            return false;
-        }
-    }
-    return true;
-}
-
 template <typename T>
 inline bool convertToNhwcImpl(T* to, const T* from, const std::vector<uint32_t>& fromDim) {
     uint32_t spatialSize = fromDim[2] * fromDim[3];
@@ -238,6 +317,10 @@
         if (from.type == OperandType::TENSOR_FLOAT32) {
             return convertToNhwcImpl<float>(reinterpret_cast<float*>(to.buffer),
                                             reinterpret_cast<const float*>(from.buffer), fromDim);
+        } else if (from.type == OperandType::TENSOR_FLOAT16) {
+            return convertToNhwcImpl<_Float16>(reinterpret_cast<_Float16*>(to.buffer),
+                                               reinterpret_cast<const _Float16*>(from.buffer),
+                                               fromDim);
         } else if (from.type == OperandType::TENSOR_QUANT8_ASYMM) {
             return convertToNhwcImpl<uint8_t>(reinterpret_cast<uint8_t*>(to.buffer),
                                               reinterpret_cast<const uint8_t*>(from.buffer),
@@ -271,6 +354,10 @@
         if (from.type == OperandType::TENSOR_FLOAT32) {
             return convertFromNhwcImpl<float>(reinterpret_cast<float*>(to.buffer),
                                               reinterpret_cast<const float*>(from.buffer), fromDim);
+        } else if (from.type == OperandType::TENSOR_FLOAT16) {
+            return convertFromNhwcImpl<_Float16>(reinterpret_cast<_Float16*>(to.buffer),
+                                                 reinterpret_cast<const _Float16*>(from.buffer),
+                                                 fromDim);
         } else if (from.type == OperandType::TENSOR_QUANT8_ASYMM) {
             return convertFromNhwcImpl<uint8_t>(reinterpret_cast<uint8_t*>(to.buffer),
                                                 reinterpret_cast<const uint8_t*>(from.buffer),
@@ -653,6 +740,14 @@
                         padding_right, padding_top, padding_bottom, stride_width, stride_height,
                         depth_multiplier, activation, reinterpret_cast<float*>(output_tmp.buffer),
                         outShape);
+            } else if (input_tmp.type == OperandType::TENSOR_FLOAT16) {
+                success = depthwiseConvFloat16(
+                        reinterpret_cast<const _Float16*>(input_tmp.buffer), input_tmp.shape(),
+                        reinterpret_cast<const _Float16*>(filter.buffer), filter.shape(),
+                        reinterpret_cast<const _Float16*>(bias.buffer), bias.shape(), padding_left,
+                        padding_right, padding_top, padding_bottom, stride_width, stride_height,
+                        depth_multiplier, activation,
+                        reinterpret_cast<_Float16*>(output_tmp.buffer), outShape);
             } else if (input_tmp.type == OperandType::TENSOR_QUANT8_ASYMM) {
                 success = depthwiseConvQuant8(
                         reinterpret_cast<const uint8_t*>(input_tmp.buffer), input_tmp.shape(),
@@ -662,7 +757,6 @@
                         depth_multiplier, activation, reinterpret_cast<uint8_t*>(output_tmp.buffer),
                         outShape);
             }
-
             if (data_layout) {
                 output_tmp_guard.reset(output_tmp.buffer);
             }
@@ -1334,13 +1428,9 @@
 
             success = reshapePrepare(input.shape(),
                                      reinterpret_cast<const int32_t*>(targetShape.buffer),
-                                     getNumberOfElements(targetShape.shape()),
-                                     &outShape) &&
+                                     getNumberOfElements(targetShape.shape()), &outShape) &&
                       setInfoAndAllocateIfNeeded(&output, outShape) &&
-                      reshapeGeneric(reinterpret_cast<const void*>(input.buffer),
-                                     input.shape(),
-                                     reinterpret_cast<void*>(output.buffer),
-                                     outShape);
+                      copyData(input.buffer, input.shape(), output.buffer, outShape);
         } break;
         case OperationType::RESIZE_BILINEAR: {
             const size_t inCount = ins.size();
@@ -1364,13 +1454,18 @@
             output_tmp.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
             output_tmp.buffer = data_layout ? nullptr : output.buffer;
 
+            if (!resizeBilinearPrepare(input_tmp.shape(), width, height, &outShape) ||
+                !setInfoAndAllocateIfNeeded(&output_tmp, outShape)) {
+                break;
+            }
             if (input_tmp.type == OperandType::TENSOR_FLOAT32) {
-                success = resizeBilinearPrepare(input_tmp.shape(), width, height, &outShape) &&
-                          setInfoAndAllocateIfNeeded(&output_tmp, outShape) &&
-                          resizeBilinearFloat32(reinterpret_cast<const float*>(input_tmp.buffer),
-                                                input_tmp.shape(),
-                                                reinterpret_cast<float*>(output_tmp.buffer),
-                                                outShape);
+                success = resizeBilinearFloat32(
+                        reinterpret_cast<const float*>(input_tmp.buffer), input_tmp.shape(),
+                        reinterpret_cast<float*>(output_tmp.buffer), outShape);
+            } else if (input_tmp.type == OperandType::TENSOR_FLOAT16) {
+                success = resizeBilinearFloat16(
+                        reinterpret_cast<const _Float16*>(input_tmp.buffer), input_tmp.shape(),
+                        reinterpret_cast<_Float16*>(output_tmp.buffer), outShape);
             }
 
             if (data_layout) {
@@ -1401,12 +1496,34 @@
             }
             output_tmp.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
             output_tmp.buffer = data_layout ? nullptr : output.buffer;
-
-            success = depthToSpacePrepare(input_tmp.shape(), blockSize, &outShape) &&
-                      setInfoAndAllocateIfNeeded(&output_tmp, outShape) &&
-                      depthToSpaceGeneric(input_tmp.buffer, input_tmp.shape(), blockSize,
-                                          output_tmp.buffer, outShape);
-
+            if (!depthToSpacePrepare(input_tmp.shape(), blockSize, &outShape) ||
+                !setInfoAndAllocateIfNeeded(&output_tmp, outShape)) {
+                break;
+            }
+            switch (input_tmp.type) {
+                case OperandType::TENSOR_FLOAT32: {
+                    success = depthToSpaceGeneric(
+                            reinterpret_cast<const float*>(input_tmp.buffer), input_tmp.shape(),
+                            blockSize, reinterpret_cast<float*>(output_tmp.buffer), outShape);
+                    break;
+                }
+                case OperandType::TENSOR_FLOAT16: {
+                    success = depthToSpaceGeneric(
+                            reinterpret_cast<const _Float16*>(input_tmp.buffer), input_tmp.shape(),
+                            blockSize, reinterpret_cast<_Float16*>(output_tmp.buffer), outShape);
+                    break;
+                }
+                case OperandType::TENSOR_QUANT8_ASYMM: {
+                    success = depthToSpaceGeneric(
+                            reinterpret_cast<const uint8_t*>(input_tmp.buffer), input_tmp.shape(),
+                            blockSize, reinterpret_cast<uint8_t*>(output_tmp.buffer), outShape);
+                    break;
+                }
+                default: {
+                    LOG(ERROR) << "Unsupported data type";
+                    success = false;
+                }
+            }
             if (data_layout) {
                 output_tmp_guard.reset(output_tmp.buffer);
             }
@@ -1436,11 +1553,34 @@
             output_tmp.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
             output_tmp.buffer = data_layout ? nullptr : output.buffer;
 
-            success = spaceToDepthPrepare(input_tmp.shape(), blockSize, &outShape) &&
-                      setInfoAndAllocateIfNeeded(&output_tmp, outShape) &&
-                      spaceToDepthGeneric(input_tmp.buffer, input_tmp.shape(), blockSize,
-                                          output_tmp.buffer, outShape);
-
+            if (!spaceToDepthPrepare(input_tmp.shape(), blockSize, &outShape) ||
+                !setInfoAndAllocateIfNeeded(&output_tmp, outShape)) {
+                break;
+            }
+            switch (input_tmp.type) {
+                case OperandType::TENSOR_FLOAT32: {
+                    success = spaceToDepthGeneric(
+                            reinterpret_cast<const float*>(input_tmp.buffer), input_tmp.shape(),
+                            blockSize, reinterpret_cast<float*>(output_tmp.buffer), outShape);
+                    break;
+                }
+                case OperandType::TENSOR_FLOAT16: {
+                    success = spaceToDepthGeneric(
+                            reinterpret_cast<const _Float16*>(input_tmp.buffer), input_tmp.shape(),
+                            blockSize, reinterpret_cast<_Float16*>(output_tmp.buffer), outShape);
+                    break;
+                }
+                case OperandType::TENSOR_QUANT8_ASYMM: {
+                    success = spaceToDepthGeneric(
+                            reinterpret_cast<const uint8_t*>(input_tmp.buffer), input_tmp.shape(),
+                            blockSize, reinterpret_cast<uint8_t*>(output_tmp.buffer), outShape);
+                    break;
+                }
+                default: {
+                    LOG(ERROR) << "Unsupported data type";
+                    success = false;
+                }
+            }
             if (data_layout) {
                 output_tmp_guard.reset(output_tmp.buffer);
             }
@@ -1577,14 +1717,39 @@
             output_tmp.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
             output_tmp.buffer = data_layout ? nullptr : output.buffer;
 
-            success = batchToSpacePrepare(input_tmp.shape(),
-                                          reinterpret_cast<const int32_t*>(blockSize.buffer),
-                                          blockSize.shape(), &outShape) &&
-                      setInfoAndAllocateIfNeeded(&output_tmp, outShape) &&
-                      batchToSpaceGeneric(input_tmp.buffer, input_tmp.shape(),
-                                          reinterpret_cast<const int32_t*>(blockSize.buffer),
-                                          output_tmp.buffer, outShape);
-
+            if (!batchToSpacePrepare(input_tmp.shape(),
+                                     reinterpret_cast<const int32_t*>(blockSize.buffer),
+                                     blockSize.shape(), &outShape) ||
+                !setInfoAndAllocateIfNeeded(&output_tmp, outShape)) {
+                break;
+            }
+            switch (input_tmp.type) {
+                case OperandType::TENSOR_FLOAT32: {
+                    success = batchToSpaceGeneric(
+                            reinterpret_cast<const float*>(input_tmp.buffer), input_tmp.shape(),
+                            reinterpret_cast<const int32_t*>(blockSize.buffer),
+                            reinterpret_cast<float*>(output_tmp.buffer), outShape);
+                    break;
+                }
+                case OperandType::TENSOR_FLOAT16: {
+                    success = batchToSpaceGeneric(
+                            reinterpret_cast<const _Float16*>(input_tmp.buffer), input_tmp.shape(),
+                            reinterpret_cast<const int32_t*>(blockSize.buffer),
+                            reinterpret_cast<_Float16*>(output_tmp.buffer), outShape);
+                    break;
+                }
+                case OperandType::TENSOR_QUANT8_ASYMM: {
+                    success = batchToSpaceGeneric(
+                            reinterpret_cast<const uint8_t*>(input_tmp.buffer), input_tmp.shape(),
+                            reinterpret_cast<const int32_t*>(blockSize.buffer),
+                            reinterpret_cast<uint8_t*>(output_tmp.buffer), outShape);
+                    break;
+                }
+                default: {
+                    LOG(ERROR) << "Unsupported data type";
+                    success = false;
+                }
+            }
             if (data_layout) {
                 output_tmp_guard.reset(output_tmp.buffer);
             }
@@ -1615,16 +1780,43 @@
             output_tmp.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
             output_tmp.buffer = data_layout ? nullptr : output.buffer;
 
-            success = spaceToBatchPrepare(
-                              input_tmp.shape(), reinterpret_cast<const int32_t*>(blockSize.buffer),
-                              blockSize.shape(), reinterpret_cast<const int32_t*>(paddings.buffer),
-                              paddings.shape(), &outShape) &&
-                      setInfoAndAllocateIfNeeded(&output_tmp, outShape) &&
-                      spaceToBatchGeneric(input_tmp.buffer, input_tmp.shape(),
-                                          reinterpret_cast<const int32_t*>(blockSize.buffer),
-                                          reinterpret_cast<const int32_t*>(paddings.buffer),
-                                          paddings.shape(), output_tmp.buffer, outShape);
-
+            if (!spaceToBatchPrepare(
+                        input_tmp.shape(), reinterpret_cast<const int32_t*>(blockSize.buffer),
+                        blockSize.shape(), reinterpret_cast<const int32_t*>(paddings.buffer),
+                        paddings.shape(), &outShape) ||
+                !setInfoAndAllocateIfNeeded(&output_tmp, outShape)) {
+                break;
+            }
+            switch (input_tmp.type) {
+                case OperandType::TENSOR_FLOAT32: {
+                    success = spaceToBatchGeneric(
+                            reinterpret_cast<const float*>(input_tmp.buffer), input_tmp.shape(),
+                            reinterpret_cast<const int32_t*>(blockSize.buffer),
+                            reinterpret_cast<const int32_t*>(paddings.buffer), paddings.shape(),
+                            reinterpret_cast<float*>(output_tmp.buffer), outShape);
+                    break;
+                }
+                case OperandType::TENSOR_FLOAT16: {
+                    success = spaceToBatchGeneric(
+                            reinterpret_cast<const _Float16*>(input_tmp.buffer), input_tmp.shape(),
+                            reinterpret_cast<const int32_t*>(blockSize.buffer),
+                            reinterpret_cast<const int32_t*>(paddings.buffer), paddings.shape(),
+                            reinterpret_cast<_Float16*>(output_tmp.buffer), outShape);
+                    break;
+                }
+                case OperandType::TENSOR_QUANT8_ASYMM: {
+                    success = spaceToBatchGeneric(
+                            reinterpret_cast<const uint8_t*>(input_tmp.buffer), input_tmp.shape(),
+                            reinterpret_cast<const int32_t*>(blockSize.buffer),
+                            reinterpret_cast<const int32_t*>(paddings.buffer), paddings.shape(),
+                            reinterpret_cast<uint8_t*>(output_tmp.buffer), outShape);
+                    break;
+                }
+                default: {
+                    LOG(ERROR) << "Unsupported data type";
+                    success = false;
+                }
+            }
             if (data_layout) {
                 output_tmp_guard.reset(output_tmp.buffer);
             }
@@ -1652,14 +1844,20 @@
             }
             if (input.type == OperandType::TENSOR_FLOAT32) {
                 float pad_value = isV2 ? getScalarData<float>(mOperands[ins[2]]) : 0;
-                success = padFloat32(reinterpret_cast<const float*>(input.buffer), input.shape(),
+                success = padGeneric(reinterpret_cast<const float*>(input.buffer), input.shape(),
                                      reinterpret_cast<const int32_t*>(paddings.buffer), pad_value,
                                      reinterpret_cast<float*>(output.buffer), outShape);
+            } else if (input.type == OperandType::TENSOR_FLOAT16) {
+                float pad_value = isV2 ? getScalarData<float>(mOperands[ins[2]]) : 0;
+                success = padGeneric(reinterpret_cast<const _Float16*>(input.buffer), input.shape(),
+                                     reinterpret_cast<const int32_t*>(paddings.buffer),
+                                     static_cast<_Float16>(pad_value),
+                                     reinterpret_cast<_Float16*>(output.buffer), outShape);
             } else if (input.type == OperandType::TENSOR_QUANT8_ASYMM) {
                 uint8_t pad_value = isV2 ? getScalarData<uint8_t>(mOperands[ins[2]]) : 0;
-                success = padQuant8(input.buffer, input.shape(),
-                                    reinterpret_cast<const int32_t*>(paddings.buffer), pad_value,
-                                    output.buffer, outShape);
+                success = padGeneric(input.buffer, input.shape(),
+                                     reinterpret_cast<const int32_t*>(paddings.buffer), pad_value,
+                                     output.buffer, outShape);
             }
         } break;
         case OperationType::CAST: {
@@ -1687,13 +1885,9 @@
 
             success = squeezePrepare(input.shape(),
                                      reinterpret_cast<const int32_t*>(squeezeDims.buffer),
-                                     squeezeDims.shape(),
-                                     &outShape) &&
+                                     squeezeDims.shape(), &outShape) &&
                       setInfoAndAllocateIfNeeded(&output, outShape) &&
-                      squeezeGeneric(input.buffer,
-                                     input.shape(),
-                                     output.buffer,
-                                     outShape);
+                      copyData(input.buffer, input.shape(), output.buffer, outShape);
         } break;
         case OperationType::TRANSPOSE: {
             if (ins.size() != 2 || outs.size() != 1 ||
@@ -1708,17 +1902,39 @@
             RunTimeOperandInfo& output = mOperands[outs[0]];
             Shape outShape = output.shape();
 
-            success = transposePrepare(input.shape(),
-                                       reinterpret_cast<const int32_t*>(perms.buffer),
-                                       perms.shape(),
-                                       &outShape) &&
-                      setInfoAndAllocateIfNeeded(&output, outShape) &&
-                      transposeGeneric(input.buffer,
-                                       input.shape(),
-                                       reinterpret_cast<const int32_t*>(perms.buffer),
-                                       perms.shape(),
-                                       output.buffer,
-                                       outShape);
+            if (!transposePrepare(input.shape(), reinterpret_cast<const int32_t*>(perms.buffer),
+                                  perms.shape(), &outShape) ||
+                !setInfoAndAllocateIfNeeded(&output, outShape)) {
+                break;
+            }
+            switch (input.type) {
+                case OperandType::TENSOR_FLOAT32: {
+                    success = transposeGeneric(
+                            reinterpret_cast<const float*>(input.buffer), input.shape(),
+                            reinterpret_cast<const int32_t*>(perms.buffer), perms.shape(),
+                            reinterpret_cast<float*>(output.buffer), outShape);
+                    break;
+                }
+                case OperandType::TENSOR_FLOAT16: {
+                    success = transposeGeneric(
+                            reinterpret_cast<const _Float16*>(input.buffer), input.shape(),
+                            reinterpret_cast<const int32_t*>(perms.buffer), perms.shape(),
+                            reinterpret_cast<_Float16*>(output.buffer), outShape);
+                    break;
+                }
+                case OperandType::TENSOR_QUANT8_ASYMM: {
+                    success = transposeGeneric(
+                            reinterpret_cast<const uint8_t*>(input.buffer), input.shape(),
+                            reinterpret_cast<const int32_t*>(perms.buffer), perms.shape(),
+                            reinterpret_cast<uint8_t*>(output.buffer), outShape);
+                    break;
+                }
+                default: {
+                    LOG(ERROR) << "Unsupported data type";
+                    success = false;
+                }
+            }
+
         } break;
         case OperationType::STRIDED_SLICE: {
             if (!allParametersPresent(7, 1)) {
@@ -1851,23 +2067,6 @@
                                      axis, isArgMin,
                                      output.buffer, outShape);
         } break;
-        case OperationType::GATHER: {
-            if (!allParametersPresent(3, 1)) {
-                return ANEURALNETWORKS_BAD_DATA;
-            }
-            const RunTimeOperandInfo& input = mOperands[ins[0]];
-            int32_t axis = getScalarData<int32_t>(mOperands[ins[1]]);
-            const RunTimeOperandInfo& indices = mOperands[ins[2]];
-
-            RunTimeOperandInfo& output = mOperands[outs[0]];
-            Shape outShape = output.shape();
-
-            success = gather::prepare(input.shape(), axis, indices.shape(), &outShape) &&
-                      setInfoAndAllocateIfNeeded(&output, outShape) &&
-                      gather::compute(input.buffer, input.shape(), axis,
-                                      reinterpret_cast<const int32_t*>(indices.buffer),
-                                      indices.shape(), output.buffer, outShape);
-        } break;
         case OperationType::EXPAND_DIMS: {
             if (!allParametersPresent(2, 1)) {
                 return ANEURALNETWORKS_BAD_DATA;
@@ -1997,6 +2196,47 @@
                 break;
             }
         } break;
+        case OperationType::ROI_POOLING: {
+            if (!allParametersPresent(5, 1)) {
+                return ANEURALNETWORKS_BAD_DATA;
+            }
+            const RunTimeOperandInfo& input = mOperands[ins[0]];
+            const RunTimeOperandInfo& roi = mOperands[ins[1]];
+            const RunTimeOperandInfo& outputShape = mOperands[ins[2]];
+            const float spatialScale = getScalarData<float>(mOperands[ins[3]]);
+            const bool data_layout = getScalarData<bool>(mOperands[ins[4]]);
+
+            RunTimeOperandInfo& out = mOperands[outs[0]];
+            Shape outShape = out.shape();
+
+            RunTimeOperandInfo input_tmp, out_tmp;
+            std::unique_ptr<uint8_t[]> input_tmp_guard, out_tmp_guard;
+            if (!convertToNhwc(input_tmp, input, input_tmp_guard, data_layout)) {
+                success = false;
+                break;
+            }
+            out_tmp.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
+            out_tmp.buffer = data_layout ? nullptr : out.buffer;
+
+            if (!roiAlignPrepare(input_tmp.shape(), reinterpret_cast<const float*>(roi.buffer),
+                                 roi.shape(), reinterpret_cast<const int32_t*>(outputShape.buffer),
+                                 outputShape.shape(), spatialScale, &outShape) ||
+                !setInfoAndAllocateIfNeeded(&out_tmp, outShape)) {
+                success = false;
+                break;
+            }
+
+            success = roiPoolingGeneric(input_tmp.buffer, input_tmp.shape(), roi.buffer,
+                                        roi.shape(), spatialScale, out_tmp.buffer, outShape);
+
+            if (data_layout) {
+                out_tmp_guard.reset(out_tmp.buffer);
+            }
+            if (!success || !convertFromNhwc(out, out_tmp, data_layout)) {
+                success = false;
+                break;
+            }
+        } break;
         case OperationType::HEATMAP_MAX_KEYPOINT: {
             if (!allParametersPresent(3, 1)) {
                 return ANEURALNETWORKS_BAD_DATA;
@@ -2412,9 +2652,41 @@
                       topk_v2::eval(input.buffer, input.shape(), k, values.buffer, valuesShape,
                                     indices.buffer, indicesShape);
         } break;
-        default:
-            nnAssert(false);
-            break;
+        case OperationType::SLICE: {
+            if (!allParametersPresent(3, 1)) {
+                return ANEURALNETWORKS_BAD_DATA;
+            }
+            const RunTimeOperandInfo& input = mOperands[ins[0]];
+            const RunTimeOperandInfo& begin = mOperands[ins[1]];
+            const RunTimeOperandInfo& size = mOperands[ins[2]];
+
+            RunTimeOperandInfo& output = mOperands[outs[0]];
+            Shape outputShape = output.shape();
+
+            const int32_t* beginBuffer = reinterpret_cast<int32_t*>(begin.buffer);
+            const int32_t* sizeBuffer = reinterpret_cast<int32_t*>(size.buffer);
+
+            success = slice::prepare(input.shape(), beginBuffer, begin.shape(), sizeBuffer,
+                                     size.shape(), &outputShape) &&
+                      setInfoAndAllocateIfNeeded(&output, outputShape) &&
+                      slice::eval(input.buffer, input.shape(), beginBuffer, begin.shape(),
+                                  sizeBuffer, size.shape(), output.buffer, output.shape());
+        } break;
+        default: {
+            const OperationRegistration* operationRegistration =
+                    OperationResolver::get()->findOperation(operation.type);
+            if (operationRegistration == nullptr) {
+                LOG(ERROR) << getOperationName(operation.type) << " not registered";
+            } else if (operationRegistration->prepare == nullptr ||
+                       operationRegistration->execute == nullptr) {
+                LOG(ERROR) << "Incomplete operation registration: "
+                           << getOperationName(operation.type);
+            } else {
+                OperationExecutionContext context(&operation, mOperands.data());
+                success = operationRegistration->prepare(&context) &&
+                          operationRegistration->execute(&context);
+            }
+        }
     }
     if (!success) {
         LOG(ERROR) << getOperationName(operation.type) << " failed.";
diff --git a/nn/common/OperationResolver.cpp b/nn/common/OperationResolver.cpp
new file mode 100644
index 0000000..3c7d2ee
--- /dev/null
+++ b/nn/common/OperationResolver.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "OperationResolver"
+
+#include "OperationResolver.h"
+
+#include "NeuralNetworks.h"
+
+namespace android {
+namespace nn {
+
+// TODO(b/119608412): Find a way to not reference every operation here.
+const OperationRegistration* register_ABS();
+const OperationRegistration* register_GATHER();
+
+OperationResolver::OperationResolver() {
+    registerOperation(register_ABS());
+    registerOperation(register_GATHER());
+}
+
+const OperationRegistration* OperationResolver::findOperation(OperationType operationType) const {
+    auto index = static_cast<int32_t>(operationType);
+    if (index < 0 || index >= kNumberOfOperationTypes) {
+        return nullptr;
+    }
+    return mRegistrations[index];
+}
+
+void OperationResolver::registerOperation(const OperationRegistration* operationRegistration) {
+    CHECK(operationRegistration != nullptr);
+    auto index = static_cast<int32_t>(operationRegistration->type);
+    CHECK_LE(0, index);
+    CHECK_LT(index, kNumberOfOperationTypes);
+    CHECK(mRegistrations[index] == nullptr);
+    mRegistrations[index] = operationRegistration;
+}
+
+}  // namespace nn
+}  // namespace android
diff --git a/nn/common/OperationsUtils.cpp b/nn/common/OperationsUtils.cpp
index f121842..877b7e3 100644
--- a/nn/common/OperationsUtils.cpp
+++ b/nn/common/OperationsUtils.cpp
@@ -25,6 +25,46 @@
 namespace android {
 namespace nn {
 
+namespace {
+
+bool validateOperandTypes(const std::vector<OperandType>& expectedTypes, const char* tag,
+                          uint32_t operandCount,
+                          std::function<OperandType(uint32_t)> getOperandType) {
+    NN_RET_CHECK_EQ(operandCount, expectedTypes.size());
+    for (uint32_t i = 0; i < operandCount; ++i) {
+        OperandType type = getOperandType(i);
+        NN_RET_CHECK(type == expectedTypes[i])
+                << "Invalid " << tag << " tensor type " << toString(type) << " for " << tag << " "
+                << i << ", expected " << toString(expectedTypes[i]);
+    }
+    return true;
+}
+
+}  // namespace
+
+bool validateInputTypes(const IOperationValidationContext* context,
+                        const std::vector<OperandType>& expectedTypes) {
+    return validateOperandTypes(expectedTypes, "input", context->getNumInputs(),
+                                [context](uint32_t index) { return context->getInputType(index); });
+}
+
+bool validateOutputTypes(const IOperationValidationContext* context,
+                         const std::vector<OperandType>& expectedTypes) {
+    return validateOperandTypes(
+            expectedTypes, "output", context->getNumOutputs(),
+            [context](uint32_t index) { return context->getOutputType(index); });
+}
+
+bool validateHalVersion(const IOperationValidationContext* context,
+                        HalVersion minSupportedHalVersion) {
+    if (context->getHalVersion() < minSupportedHalVersion) {
+        NN_RET_CHECK_FAIL() << "The given inputs and outputs are only supported in "
+                            << toString(minSupportedHalVersion) << " and later (validating using "
+                            << toString(context->getHalVersion()) << ")";
+    }
+    return true;
+}
+
 bool SameShape(const Shape& in1, const Shape& in2) {
     if (in1.type != in2.type || in1.dimensions.size() != in2.dimensions.size()) {
         return false;
@@ -407,31 +447,24 @@
     return true;
 }
 
-bool concatenationPrepare(const std::vector<Shape>& inputShapes,
-                          int32_t axis,
-                          Shape* output) {
-
+bool concatenationPrepare(const std::vector<Shape>& inputShapes, int32_t axis, Shape* output) {
     int num_inputs = inputShapes.size();
     OperandType input_type = inputShapes[0].type;
     uint32_t num_dimensions = getNumberOfDimensions(inputShapes[0]);
 
-    NN_OPS_CHECK(axis >= 0);
-    NN_OPS_CHECK(axis < (int32_t)num_dimensions);
+    NN_RET_CHECK(axis >= 0);
+    NN_RET_CHECK(axis < (int32_t)num_dimensions);
 
     int sumAxis = getSizeOfDimension(inputShapes[0], axis);
     for (int i = 1; i < num_inputs; ++i) {
-        NN_OPS_CHECK(getNumberOfDimensions(inputShapes[i]) == num_dimensions);
-        NN_OPS_CHECK(inputShapes[i].type == inputShapes[0].type);
-        if (input_type == OperandType::TENSOR_QUANT8_ASYMM) {
-            NN_OPS_CHECK(inputShapes[0].offset == inputShapes[i].offset);
-            NN_OPS_CHECK(inputShapes[0].scale == inputShapes[i].scale);
-        }
+        NN_RET_CHECK(getNumberOfDimensions(inputShapes[i]) == num_dimensions);
+        NN_RET_CHECK(inputShapes[i].type == inputShapes[0].type);
         for (int d = 0; d < (int32_t)num_dimensions; ++d) {
             if (d == axis) {
                 sumAxis += getSizeOfDimension(inputShapes[i], axis);
             } else {
-                NN_OPS_CHECK(getSizeOfDimension(inputShapes[0], d) ==
-                           getSizeOfDimension(inputShapes[i], d));
+                NN_RET_CHECK_EQ(getSizeOfDimension(inputShapes[0], d),
+                                getSizeOfDimension(inputShapes[i], d));
             }
         }
     }
@@ -440,15 +473,9 @@
     output->dimensions = inputShapes[0].dimensions;
     output->dimensions[axis] = sumAxis;
 
-    if (input_type == OperandType::TENSOR_QUANT8_ASYMM) {
-        NN_OPS_CHECK(inputShapes[0].offset == output->offset);
-        NN_OPS_CHECK(inputShapes[0].scale == output->scale);
-    }
-
     return true;
 }
 
-
 bool genericNormalizationPrepare(const Shape& input, Shape* output) {
     return SetShape(input, output);
 }
diff --git a/nn/common/Utils.cpp b/nn/common/Utils.cpp
index 998f031..96f90d1 100644
--- a/nn/common/Utils.cpp
+++ b/nn/common/Utils.cpp
@@ -16,9 +16,11 @@
 
 #define LOG_TAG "Utils"
 
+#include "Utils.h"
+
 #include "NeuralNetworks.h"
 #include "NeuralNetworksOEM.h"
-#include "Utils.h"
+#include "OperationResolver.h"
 #include "ValidateHal.h"
 
 #include <android-base/logging.h>
@@ -96,6 +98,82 @@
     }
 }
 
+class OperationValidationContext : public IOperationValidationContext {
+    DISALLOW_IMPLICIT_CONSTRUCTORS(OperationValidationContext);
+
+   public:
+    OperationValidationContext(uint32_t inputCount, const uint32_t* inputIndexes,
+                               uint32_t outputCount, const uint32_t* outputIndexes,
+                               const Operand* operands, HalVersion halVersion)
+        : inputCount(inputCount),
+          inputIndexes(inputIndexes),
+          outputCount(outputCount),
+          outputIndexes(outputIndexes),
+          operands(operands),
+          halVersion(halVersion) {}
+
+    HalVersion getHalVersion() const override;
+
+    uint32_t getNumInputs() const override;
+    OperandType getInputType(uint32_t index) const override;
+    Shape getInputShape(uint32_t index) const override;
+
+    uint32_t getNumOutputs() const override;
+    OperandType getOutputType(uint32_t index) const override;
+    Shape getOutputShape(uint32_t index) const override;
+
+   private:
+    const Operand* getInputOperand(uint32_t index) const;
+    const Operand* getOutputOperand(uint32_t index) const;
+
+    uint32_t inputCount;
+    const uint32_t* inputIndexes;
+    uint32_t outputCount;
+    const uint32_t* outputIndexes;
+    const Operand* operands;
+    HalVersion halVersion;
+};
+
+HalVersion OperationValidationContext::getHalVersion() const {
+    return halVersion;
+}
+
+const Operand* OperationValidationContext::getInputOperand(uint32_t index) const {
+    CHECK(index < static_cast<uint32_t>(inputCount));
+    return &operands[inputIndexes[index]];
+}
+
+const Operand* OperationValidationContext::getOutputOperand(uint32_t index) const {
+    CHECK(index < static_cast<uint32_t>(outputCount));
+    return &operands[outputIndexes[index]];
+}
+
+uint32_t OperationValidationContext::getNumInputs() const {
+    return inputCount;
+}
+
+uint32_t OperationValidationContext::getNumOutputs() const {
+    return outputCount;
+}
+
+OperandType OperationValidationContext::getInputType(uint32_t index) const {
+    return getInputOperand(index)->type;
+}
+
+Shape OperationValidationContext::getInputShape(uint32_t index) const {
+    const Operand* operand = getInputOperand(index);
+    return Shape{operand->type, operand->dimensions, operand->scale, operand->zeroPoint};
+}
+
+OperandType OperationValidationContext::getOutputType(uint32_t index) const {
+    return getOutputOperand(index)->type;
+}
+
+Shape OperationValidationContext::getOutputShape(uint32_t index) const {
+    const Operand* operand = getOutputOperand(index);
+    return Shape{operand->type, operand->dimensions, operand->scale, operand->zeroPoint};
+}
+
 };  // anonymous namespace
 
 #define COUNT(X) (sizeof(X) / sizeof(X[0]))
@@ -228,9 +306,12 @@
 static_assert(COUNT(kOperationNamesOEM) == kNumberOfOperationTypesOEM,
               "kOperationNamesOEM is incorrect");
 
+static const char* getOperationName(uint32_t code) {
+    return tableLookup(kOperationNames, kOperationNamesOEM, code);
+}
+
 const char* getOperationName(OperationType type) {
-    uint32_t n = static_cast<uint32_t>(type);
-    return tableLookup(kOperationNames, kOperationNamesOEM, n);
+    return getOperationName(static_cast<uint32_t>(type));
 }
 
 const uint32_t kSizeOfDataType[]{
@@ -438,7 +519,7 @@
 static int validateHalVersion(ANeuralNetworksOperationType opType, HalVersion halVersion,
                               HalVersion minSupportedHalVersion) {
     if (halVersion < minSupportedHalVersion) {
-        LOG(ERROR) << "The given inputs and outputs for operation " << kOperationNames[opType]
+        LOG(ERROR) << "The given inputs and outputs for operation " << getOperationName(opType)
                    << " are only supported in " << toString(minSupportedHalVersion)
                    << " and later (validating using " << toString(halVersion) << ")";
         return ANEURALNETWORKS_BAD_DATA;
@@ -462,10 +543,9 @@
     }
 
     auto logInvalidInOutNumber = [opType, inputCount, outputCount](int expIn, int expOut) {
-        LOG(ERROR) << "Invalid number of input operands ("
-                   << inputCount << ", expected " << expIn << ") or output operands ("
-                   << outputCount << ", expected " << expOut << ") for operation "
-                   << kOperationNames[opType];
+        LOG(ERROR) << "Invalid number of input operands (" << inputCount << ", expected " << expIn
+                   << ") or output operands (" << outputCount << ", expected " << expOut
+                   << ") for operation " << getOperationName(opType);
     };
 
     switch (opType) {
@@ -499,7 +579,7 @@
                 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             return validateOperationOperandTypes(operands,
@@ -536,7 +616,7 @@
                 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
@@ -559,7 +639,7 @@
                 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
@@ -582,7 +662,7 @@
                 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
@@ -605,7 +685,7 @@
                 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
@@ -618,35 +698,46 @@
                 outputCount != 1) {
                 LOG(ERROR) << "Invalid number of input operands (" << inputCount
                            << ", expected 12, 11, 9 or 8) or output operands (" << outputCount
-                           << ", expected 1) for operation " << kOperationNames[opType];
+                           << ", expected 1) for operation " << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             auto inputType = operands[inputIndexes[0]].type;
             std::vector<OperandType> inExpectedTypes;
             std::vector<OperandType> outExpectedTypes;
             if (inputType == OperandType::TENSOR_FLOAT32) {
-                inExpectedTypes = {OperandType::TENSOR_FLOAT32,
-                                   OperandType::TENSOR_FLOAT32,
-                                   OperandType::TENSOR_FLOAT32,
-                                   OperandType::INT32,
-                                   OperandType::INT32,
-                                   OperandType::INT32,
-                                   OperandType::INT32,
-                                   OperandType::INT32};
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
+                inExpectedTypes = {
+                        OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32,
+                        OperandType::TENSOR_FLOAT32, OperandType::INT32,
+                        OperandType::INT32,          OperandType::INT32,
+                        OperandType::INT32,          OperandType::INT32,
+                };
                 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+            } else if (inputType == OperandType::TENSOR_FLOAT16) {
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
+                inExpectedTypes = {
+                        OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16,
+                        OperandType::TENSOR_FLOAT16, OperandType::INT32,
+                        OperandType::INT32,          OperandType::INT32,
+                        OperandType::INT32,          OperandType::INT32,
+                };
+                outExpectedTypes = {OperandType::TENSOR_FLOAT16};
             } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
-                inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
-                                   OperandType::TENSOR_QUANT8_ASYMM,
-                                   OperandType::TENSOR_INT32,
-                                   OperandType::INT32,
-                                   OperandType::INT32,
-                                   OperandType::INT32,
-                                   OperandType::INT32,
-                                   OperandType::INT32};
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
+                inExpectedTypes = {
+                        OperandType::TENSOR_QUANT8_ASYMM,
+                        OperandType::TENSOR_QUANT8_ASYMM,
+                        OperandType::TENSOR_INT32,
+                        OperandType::INT32,
+                        OperandType::INT32,
+                        OperandType::INT32,
+                        OperandType::INT32,
+                        OperandType::INT32,
+                };
                 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
 
@@ -673,7 +764,7 @@
                 outputCount != 1) {
                 LOG(ERROR) << "Invalid number of input operands (" << inputCount
                            << ", expected 11, 10, 8 or 7) or output operands (" << outputCount
-                           << ", expected 1) for operation " << kOperationNames[opType];
+                           << ", expected 1) for operation " << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             auto inputType = operands[inputIndexes[0]].type;
@@ -699,7 +790,7 @@
                 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
 
@@ -726,7 +817,7 @@
                 outputCount != 1) {
                 LOG(ERROR) << "Invalid number of input operands (" << inputCount
                            << ", expected 11, 10, 8 or 7) or output operands (" << outputCount
-                           << ", expected 1) for operation " << kOperationNames[opType];
+                           << ", expected 1) for operation " << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             auto inputType = operands[inputIndexes[0]].type;
@@ -752,7 +843,7 @@
                 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
 
@@ -779,7 +870,7 @@
                 outputCount != 1) {
                 LOG(ERROR) << "Invalid number of input operands (" << inputCount
                            << ", expected 11, 10, 8 or 7) or output operands (" << outputCount
-                           << ", expected 1) for operation " << kOperationNames[opType];
+                           << ", expected 1) for operation " << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             auto inputType = operands[inputIndexes[0]].type;
@@ -796,7 +887,7 @@
                 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
 
@@ -823,7 +914,7 @@
                 outputCount != 1) {
                 LOG(ERROR) << "Invalid number of input operands (" << inputCount
                            << ", expected 11, 10, 8 or 7) or output operands (" << outputCount
-                           << ", expected 1) for operation " << kOperationNames[opType];
+                           << ", expected 1) for operation " << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             auto inputType = operands[inputIndexes[0]].type;
@@ -849,7 +940,7 @@
                 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
 
@@ -893,7 +984,7 @@
                 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
@@ -922,7 +1013,7 @@
                 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             return validateOperationOperandTypes(operands,
@@ -951,7 +1042,7 @@
                 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
@@ -965,7 +1056,7 @@
             if ((inputCount != 3 && inputCount != 2) || outputCount != 1) {
                 LOG(ERROR) << "Invalid number of input operands (" << inputCount
                            << ", expected 3 or 2) or output operands (" << outputCount
-                           << ", expected 1) for operation " << kOperationNames[opType];
+                           << ", expected 1) for operation " << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             auto inputType = operands[inputIndexes[0]].type;
@@ -985,7 +1076,7 @@
                 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             if (inputCount == 3) {
@@ -1025,7 +1116,7 @@
                 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
@@ -1037,10 +1128,9 @@
         }
         case ANEURALNETWORKS_CONCATENATION: {
             if (inputCount < 2 || outputCount != 1) {
-                LOG(ERROR) << "Invalid number of input operands ("
-                           << inputCount << ", expected at least 2) or output operands ("
-                           << outputCount << ", expected 1) for operation "
-                           << kOperationNames[opType];
+                LOG(ERROR) << "Invalid number of input operands (" << inputCount
+                           << ", expected at least 2) or output operands (" << outputCount
+                           << ", expected 1) for operation " << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             auto inputType = operands[inputIndexes[0]].type;
@@ -1058,9 +1148,19 @@
                 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
             } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
                 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
+
+                const Operand& output = operands[outputIndexes[0]];
+                for (uint32_t i = 0; i < inputCount; ++i) {
+                    const Operand& input = operands[inputIndexes[i]];
+                    if (input.scale != output.scale || input.zeroPoint != output.zeroPoint) {
+                        NN_RETURN_IF_ERROR(
+                                validateHalVersion(opType, halVersion, HalVersion::V1_2));
+                        break;
+                    }
+                }
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             return validateOperationOperandTypes(operands, inputCount, inputIndexes,
@@ -1071,7 +1171,7 @@
             if ((inputCount != 2 && inputCount != 1) || outputCount != 1) {
                 LOG(ERROR) << "Invalid number of input operands (" << inputCount
                            << ", expected 2 or 1) or output operands (" << outputCount
-                           << ", expected 1) for operation " << kOperationNames[opType];
+                           << ", expected 1) for operation " << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             auto inputType = operands[inputIndexes[0]].type;
@@ -1082,7 +1182,7 @@
                 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             if (inputCount == 2) {
@@ -1105,7 +1205,7 @@
             if ((inputCount != 6 && inputCount != 5) || outputCount != 1) {
                 LOG(ERROR) << "Invalid number of input operands (" << inputCount
                            << ", expected 6 or 5) or output operands (" << outputCount
-                           << ", expected 1) for operation " << kOperationNames[opType];
+                           << ", expected 1) for operation " << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             auto inputType = operands[inputIndexes[0]].type;
@@ -1120,7 +1220,7 @@
                 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             if (inputCount == 6) {
@@ -1148,19 +1248,24 @@
             std::vector<OperandType> inExpectedTypes;
             std::vector<OperandType> outExpectedTypes;
             if (inputType == OperandType::TENSOR_FLOAT32) {
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
                 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
                                    OperandType::TENSOR_INT32};
                 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+            } else if (inputType == OperandType::TENSOR_FLOAT16) {
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
+                inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_INT32};
+                outExpectedTypes = {OperandType::TENSOR_FLOAT16};
             } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
                 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
                                    OperandType::TENSOR_INT32};
                 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
-            NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
             return validateOperationOperandTypes(operands,
                                                  inputCount, inputIndexes,
                                                  inExpectedTypes,
@@ -1171,20 +1276,31 @@
             if ((inputCount != 4 && inputCount != 3) || outputCount != 1) {
                 LOG(ERROR) << "Invalid number of input operands (" << inputCount
                            << ", expected 4 or 3) or output operands (" << outputCount
-                           << ", expected 1) for operation " << kOperationNames[opType];
+                           << ", expected 1) for operation " << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             auto inputType = operands[inputIndexes[0]].type;
             std::vector<OperandType> inExpectedTypes;
             std::vector<OperandType> outExpectedTypes;
             if (inputType == OperandType::TENSOR_FLOAT32) {
-                inExpectedTypes = {OperandType::TENSOR_FLOAT32,
-                                   OperandType::INT32,
-                                   OperandType::INT32};
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
+                inExpectedTypes = {
+                        OperandType::TENSOR_FLOAT32,
+                        OperandType::INT32,
+                        OperandType::INT32,
+                };
                 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+            } else if (inputType == OperandType::TENSOR_FLOAT16) {
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
+                inExpectedTypes = {
+                        OperandType::TENSOR_FLOAT16,
+                        OperandType::INT32,
+                        OperandType::INT32,
+                };
+                outExpectedTypes = {OperandType::TENSOR_FLOAT16};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             if (inputCount == 4) {
@@ -1203,23 +1319,29 @@
             if ((inputCount != 3 && inputCount != 2) || outputCount != 1) {
                 LOG(ERROR) << "Invalid number of input operands (" << inputCount
                            << ", expected 3 or 2) or output operands (" << outputCount
-                           << ", expected 1) for operation " << kOperationNames[opType];
+                           << ", expected 1) for operation " << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             auto inputType = operands[inputIndexes[0]].type;
             std::vector<OperandType> inExpectedTypes;
             std::vector<OperandType> outExpectedTypes;
             if (inputType == OperandType::TENSOR_FLOAT32) {
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
                 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
                                    OperandType::INT32};
                 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+            } else if (inputType == OperandType::TENSOR_FLOAT16) {
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
+                inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::INT32};
+                outExpectedTypes = {OperandType::TENSOR_FLOAT16};
             } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
                 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
                                    OperandType::INT32};
                 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             if (inputCount == 3) {
@@ -1238,23 +1360,29 @@
             if ((inputCount != 3 && inputCount != 2) || outputCount != 1) {
                 LOG(ERROR) << "Invalid number of input operands (" << inputCount
                            << ", expected 3 or 2) or output operands (" << outputCount
-                           << ", expected 1) for operation " << kOperationNames[opType];
+                           << ", expected 1) for operation " << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             auto inputType = operands[inputIndexes[0]].type;
             std::vector<OperandType> inExpectedTypes;
             std::vector<OperandType> outExpectedTypes;
             if (inputType == OperandType::TENSOR_FLOAT32) {
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
                 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
                                    OperandType::INT32};
                 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+            } else if (inputType == OperandType::TENSOR_FLOAT16) {
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
+                inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::INT32};
+                outExpectedTypes = {OperandType::TENSOR_FLOAT16};
             } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
                 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
                                    OperandType::INT32};
                 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             if (inputCount == 3) {
@@ -1279,7 +1407,7 @@
                 inputType != OperandType::TENSOR_INT32 &&
                 inputType != OperandType::TENSOR_QUANT8_ASYMM) {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_INT32,
@@ -1302,7 +1430,7 @@
                 inputType != OperandType::TENSOR_INT32 &&
                 inputType != OperandType::TENSOR_QUANT8_ASYMM) {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_INT32,
@@ -1327,7 +1455,7 @@
                 inputType != OperandType::TENSOR_INT32 &&
                 inputType != OperandType::TENSOR_QUANT8_ASYMM) {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_FLOAT32,
@@ -1383,7 +1511,7 @@
             } else {
                 LOG(ERROR) << "Invalid number of input operands (" << inputCount
                            << ", expected 23 or 27) or output operands (" << outputCount
-                           << ", expected 4) for operation " << kOperationNames[opType];
+                           << ", expected 4) for operation " << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             return validateOperationOperandTypes(operands,
@@ -1471,23 +1599,34 @@
             if ((inputCount != 3 && inputCount != 2) || outputCount != 1) {
                 LOG(ERROR) << "Invalid number of input operands (" << inputCount
                            << ", expected 3 or 2) or output operands (" << outputCount
-                           << ", expected 1) for operation " << kOperationNames[opType];
+                           << ", expected 1) for operation " << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             auto inputType = operands[inputIndexes[0]].type;
             std::vector<OperandType> inExpectedTypes;
             std::vector<OperandType> outExpectedTypes;
             if (inputType == OperandType::TENSOR_FLOAT32) {
-                inExpectedTypes = {OperandType::TENSOR_FLOAT32,
-                                   OperandType::TENSOR_INT32};
+                inExpectedTypes = {
+                        OperandType::TENSOR_FLOAT32,
+                        OperandType::TENSOR_INT32,
+                };
                 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+            } else if (inputType == OperandType::TENSOR_FLOAT16) {
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
+                inExpectedTypes = {
+                        OperandType::TENSOR_FLOAT16,
+                        OperandType::TENSOR_INT32,
+                };
+                outExpectedTypes = {OperandType::TENSOR_FLOAT16};
             } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
-                inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
-                                   OperandType::TENSOR_INT32};
+                inExpectedTypes = {
+                        OperandType::TENSOR_QUANT8_ASYMM,
+                        OperandType::TENSOR_INT32,
+                };
                 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             if (inputCount == 3) {
@@ -1506,25 +1645,37 @@
             if ((inputCount != 4 && inputCount != 3) || outputCount != 1) {
                 LOG(ERROR) << "Invalid number of input operands (" << inputCount
                            << ", expected 4 or 3) or output operands (" << outputCount
-                           << ", expected 1) for operation " << kOperationNames[opType];
+                           << ", expected 1) for operation " << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             auto inputType = operands[inputIndexes[0]].type;
             std::vector<OperandType> inExpectedTypes;
             std::vector<OperandType> outExpectedTypes;
             if (inputType == OperandType::TENSOR_FLOAT32) {
-                inExpectedTypes = {OperandType::TENSOR_FLOAT32,
-                                   OperandType::TENSOR_INT32,
-                                   OperandType::TENSOR_INT32};
+                inExpectedTypes = {
+                        OperandType::TENSOR_FLOAT32,
+                        OperandType::TENSOR_INT32,
+                        OperandType::TENSOR_INT32,
+                };
                 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+            } else if (inputType == OperandType::TENSOR_FLOAT16) {
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
+                inExpectedTypes = {
+                        OperandType::TENSOR_FLOAT16,
+                        OperandType::TENSOR_INT32,
+                        OperandType::TENSOR_INT32,
+                };
+                outExpectedTypes = {OperandType::TENSOR_FLOAT16};
             } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
-                inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
-                                   OperandType::TENSOR_INT32,
-                                   OperandType::TENSOR_INT32};
+                inExpectedTypes = {
+                        OperandType::TENSOR_QUANT8_ASYMM,
+                        OperandType::TENSOR_INT32,
+                        OperandType::TENSOR_INT32,
+                };
                 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             if (inputCount == 4) {
@@ -1548,19 +1699,31 @@
             std::vector<OperandType> inExpectedTypes;
             std::vector<OperandType> outExpectedTypes;
             if (inputType == OperandType::TENSOR_FLOAT32) {
-                inExpectedTypes = {OperandType::TENSOR_FLOAT32,
-                                   OperandType::TENSOR_INT32};
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
+                inExpectedTypes = {
+                        OperandType::TENSOR_FLOAT32,
+                        OperandType::TENSOR_INT32,
+                };
                 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+            } else if (inputType == OperandType::TENSOR_FLOAT16) {
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
+                inExpectedTypes = {
+                        OperandType::TENSOR_FLOAT16,
+                        OperandType::TENSOR_INT32,
+                };
+                outExpectedTypes = {OperandType::TENSOR_FLOAT16};
             } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
-                inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
-                                   OperandType::TENSOR_INT32};
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
+                inExpectedTypes = {
+                        OperandType::TENSOR_QUANT8_ASYMM,
+                        OperandType::TENSOR_INT32,
+                };
                 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
-            NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
             return validateOperationOperandTypes(operands, inputCount, inputIndexes,
                                                  inExpectedTypes, outputCount, outputIndexes,
                                                  outExpectedTypes);
@@ -1574,23 +1737,36 @@
             std::vector<OperandType> inExpectedTypes;
             std::vector<OperandType> outExpectedTypes;
             if (inputType == OperandType::TENSOR_FLOAT32) {
-                inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_INT32,
-                                   OperandType::FLOAT32};
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
+                inExpectedTypes = {
+                        OperandType::TENSOR_FLOAT32,
+                        OperandType::TENSOR_INT32,
+                        OperandType::FLOAT32,
+                };
                 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+            } else if (inputType == OperandType::TENSOR_FLOAT16) {
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
+                inExpectedTypes = {
+                        OperandType::TENSOR_FLOAT16,
+                        OperandType::TENSOR_INT32,
+                        OperandType::FLOAT32,
+                };
+                outExpectedTypes = {OperandType::TENSOR_FLOAT16};
             } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
-                inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_INT32,
-                                   OperandType::INT32};  // TODO(b/116699425): Make it UINT8.
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
+                inExpectedTypes = {
+                        OperandType::TENSOR_QUANT8_ASYMM,
+                        OperandType::TENSOR_INT32,
+                        OperandType::INT32,
+                };  // TODO(b/116699425): Make it UINT8.
                 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
-            NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
-            return validateOperationOperandTypes(operands,
-                                                 inputCount, inputIndexes,
-                                                 inExpectedTypes,
-                                                 outputCount, outputIndexes,
+            return validateOperationOperandTypes(operands, inputCount, inputIndexes,
+                                                 inExpectedTypes, outputCount, outputIndexes,
                                                  outExpectedTypes);
         }
         case ANEURALNETWORKS_CAST: {
@@ -1608,7 +1784,7 @@
                 inExpectedTypes = {inputType};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             std::vector<OperandType> outExpectedTypes;
@@ -1619,7 +1795,7 @@
                 outExpectedTypes = {outputType};
             } else {
                 LOG(ERROR) << "Unsupported output tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
@@ -1636,19 +1812,24 @@
             std::vector<OperandType> inExpectedTypes;
             std::vector<OperandType> outExpectedTypes;
             if (inputType == OperandType::TENSOR_FLOAT32) {
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
                 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
                                    OperandType::TENSOR_INT32};
                 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+            } else if (inputType == OperandType::TENSOR_FLOAT16) {
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
+                inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_INT32};
+                outExpectedTypes = {OperandType::TENSOR_FLOAT16};
             } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
                 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
                                    OperandType::TENSOR_INT32};
                 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
-            NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
             return validateOperationOperandTypes(operands,
                                                  inputCount, inputIndexes,
                                                  inExpectedTypes,
@@ -1664,19 +1845,22 @@
             std::vector<OperandType> inExpectedTypes;
             std::vector<OperandType> outExpectedTypes;
             if (inputType == OperandType::TENSOR_FLOAT32) {
-                inExpectedTypes = {OperandType::TENSOR_FLOAT32,
-                                   OperandType::TENSOR_INT32};
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
+                inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_INT32};
                 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+            } else if (inputType == OperandType::TENSOR_FLOAT16) {
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
+                inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_INT32};
+                outExpectedTypes = {OperandType::TENSOR_FLOAT16};
             } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
-                inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
-                                   OperandType::TENSOR_INT32};
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
+                inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_INT32};
                 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
-            NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
             return validateOperationOperandTypes(operands,
                                                  inputCount, inputIndexes,
                                                  inExpectedTypes,
@@ -1692,33 +1876,42 @@
             std::vector<OperandType> inExpectedTypes;
             std::vector<OperandType> outExpectedTypes;
             if (inputType == OperandType::TENSOR_FLOAT32) {
-                inExpectedTypes = {OperandType::TENSOR_FLOAT32,
-                                   OperandType::TENSOR_INT32,
-                                   OperandType::TENSOR_INT32,
-                                   OperandType::TENSOR_INT32,
-                                   OperandType::INT32,
-                                   OperandType::INT32,
-                                   OperandType::INT32};
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
+                inExpectedTypes = {
+                        OperandType::TENSOR_FLOAT32, OperandType::TENSOR_INT32,
+                        OperandType::TENSOR_INT32,   OperandType::TENSOR_INT32,
+                        OperandType::INT32,          OperandType::INT32,
+                        OperandType::INT32,
+                };
                 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+            } else if (inputType == OperandType::TENSOR_FLOAT16) {
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
+                inExpectedTypes = {
+                        OperandType::TENSOR_FLOAT16, OperandType::TENSOR_INT32,
+                        OperandType::TENSOR_INT32,   OperandType::TENSOR_INT32,
+                        OperandType::INT32,          OperandType::INT32,
+                        OperandType::INT32,
+                };
+                outExpectedTypes = {OperandType::TENSOR_FLOAT16};
             } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
-                inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
-                                   OperandType::TENSOR_INT32,
-                                   OperandType::TENSOR_INT32,
-                                   OperandType::TENSOR_INT32,
-                                   OperandType::INT32,
-                                   OperandType::INT32,
-                                   OperandType::INT32};
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
+                inExpectedTypes = {
+                        OperandType::TENSOR_QUANT8_ASYMM,
+                        OperandType::TENSOR_INT32,
+                        OperandType::TENSOR_INT32,
+                        OperandType::TENSOR_INT32,
+                        OperandType::INT32,
+                        OperandType::INT32,
+                        OperandType::INT32,
+                };
                 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
-            NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
-            return validateOperationOperandTypes(operands,
-                                                 inputCount, inputIndexes,
-                                                 inExpectedTypes,
-                                                 outputCount, outputIndexes,
+            return validateOperationOperandTypes(operands, inputCount, inputIndexes,
+                                                 inExpectedTypes, outputCount, outputIndexes,
                                                  outExpectedTypes);
         }
         case ANEURALNETWORKS_DIV: {
@@ -1744,7 +1937,7 @@
                 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
@@ -1774,7 +1967,7 @@
                 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             return validateOperationOperandTypes(operands,
@@ -1803,7 +1996,7 @@
                 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
@@ -1822,14 +2015,15 @@
             auto inputType = operands[inputIndexes[0]].type;
             std::vector<OperandType> inExpectedTypes;
             std::vector<OperandType> outExpectedTypes;
-            if (inputType == OperandType::TENSOR_FLOAT32 ||
+            if (inputType == OperandType::TENSOR_FLOAT16 ||
+                inputType == OperandType::TENSOR_FLOAT32 ||
                 inputType == OperandType::TENSOR_INT32 ||
                 inputType == OperandType::TENSOR_QUANT8_ASYMM) {
                 inExpectedTypes = {inputType, OperandType::INT32};
                 outExpectedTypes = {OperandType::TENSOR_INT32};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
@@ -1852,7 +2046,7 @@
                 outExpectedTypes = {inputType};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
@@ -1860,36 +2054,10 @@
                                                  inExpectedTypes, outputCount, outputIndexes,
                                                  outExpectedTypes);
         }
-        case ANEURALNETWORKS_GATHER: {
-            if (inputCount != 3 || outputCount != 1) {
-                logInvalidInOutNumber(3, 1);
-                return ANEURALNETWORKS_BAD_DATA;
-            }
-            auto inputType = operands[inputIndexes[0]].type;
-            std::vector<OperandType> inExpectedTypes;
-            std::vector<OperandType> outExpectedTypes;
-            if (inputType == OperandType::TENSOR_FLOAT16 ||
-                inputType == OperandType::TENSOR_FLOAT32 ||
-                inputType == OperandType::TENSOR_INT32 ||
-                inputType == OperandType::TENSOR_QUANT8_ASYMM) {
-                inExpectedTypes = {inputType, OperandType::INT32, OperandType::TENSOR_INT32};
-                outExpectedTypes = {inputType};
-            } else {
-                LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
-                return ANEURALNETWORKS_BAD_DATA;
-            }
-            NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
-            return validateOperationOperandTypes(operands,
-                                                 inputCount, inputIndexes,
-                                                 inExpectedTypes,
-                                                 outputCount, outputIndexes,
-                                                 outExpectedTypes);
-        }
         case ANEURALNETWORKS_SPLIT: {
             if (inputCount != 3) {
                 LOG(ERROR) << "Invalid number of input operands (" << inputCount << ", expected 3)"
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             auto inputType = operands[inputIndexes[0]].type;
@@ -1898,7 +2066,7 @@
                 inputType != OperandType::TENSOR_INT32 &&
                 inputType != OperandType::TENSOR_QUANT8_ASYMM) {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             std::vector<OperandType> inExpectedTypes = {inputType, OperandType::INT32,
@@ -1928,6 +2096,30 @@
                 outExpectedTypes = {inputType};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
+                           << getOperationName(opType);
+                return ANEURALNETWORKS_BAD_DATA;
+            }
+            NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
+            return validateOperationOperandTypes(operands, inputCount, inputIndexes,
+                                                 inExpectedTypes, outputCount, outputIndexes,
+                                                 outExpectedTypes);
+        }
+        case ANEURALNETWORKS_ROI_POOLING: {
+            if (inputCount != 5 || outputCount != 1) {
+                logInvalidInOutNumber(5, 1);
+                return ANEURALNETWORKS_BAD_DATA;
+            }
+            std::vector<OperandType> inExpectedTypes;
+            std::vector<OperandType> outExpectedTypes;
+            auto inputType = operands[inputIndexes[0]].type;
+            if (inputType == OperandType::TENSOR_FLOAT32 ||
+                inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+                inExpectedTypes = {inputType, OperandType::TENSOR_FLOAT32,
+                                   OperandType::TENSOR_INT32, OperandType::FLOAT32,
+                                   OperandType::BOOL};
+                outExpectedTypes = {inputType};
+            } else {
+                LOG(ERROR) << "Unsupported input tensor type for operation "
                            << kOperationNames[opType];
                 return ANEURALNETWORKS_BAD_DATA;
             }
@@ -1968,7 +2160,7 @@
                 outExpectedTypes = {inputType};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
@@ -1980,7 +2172,7 @@
             if ((inputCount != 12 && inputCount != 9) || outputCount != 1) {
                 LOG(ERROR) << "Invalid number of input operands (" << inputCount
                            << ", expected 12 or 9) or output operands (" << outputCount
-                           << ", expected 1) for operation " << kOperationNames[opType];
+                           << ", expected 1) for operation " << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             auto inputType = operands[inputIndexes[0]].type;
@@ -2004,7 +2196,7 @@
                 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
 
@@ -2037,7 +2229,7 @@
                 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
@@ -2049,7 +2241,7 @@
             if ((inputCount != 11 && inputCount != 9) || outputCount != 1) {
                 LOG(ERROR) << "Invalid number of input operands (" << inputCount
                            << ", expected 11 or 9) or output operands (" << outputCount
-                           << ", expected 1) for operation " << kOperationNames[opType];
+                           << ", expected 1) for operation " << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             auto inputType = operands[inputIndexes[0]].type;
@@ -2065,7 +2257,7 @@
                 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
 
@@ -2103,7 +2295,7 @@
                 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
@@ -2127,7 +2319,7 @@
                 outExpectedTypes = {inputType};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << kOperationNames[opType];
+                           << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
             NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
@@ -2199,6 +2391,30 @@
                 outExpectedTypes = {inputType, OperandType::TENSOR_INT32};
             } else {
                 LOG(ERROR) << "Unsupported input tensor type for operation "
+                           << getOperationName(opType);
+                return ANEURALNETWORKS_BAD_DATA;
+            }
+            NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
+            return validateOperationOperandTypes(operands, inputCount, inputIndexes,
+                                                 inExpectedTypes, outputCount, outputIndexes,
+                                                 outExpectedTypes);
+        }
+        case ANEURALNETWORKS_SLICE: {
+            if (inputCount != 3 || outputCount != 1) {
+                logInvalidInOutNumber(3, 1);
+                return ANEURALNETWORKS_BAD_DATA;
+            }
+            std::vector<OperandType> inExpectedTypes;
+            std::vector<OperandType> outExpectedTypes;
+            OperandType inputType = operands[inputIndexes[0]].type;
+            if (inputType == OperandType::TENSOR_FLOAT16 ||
+                inputType == OperandType::TENSOR_FLOAT32 ||
+                inputType == OperandType::TENSOR_INT32 ||
+                inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+                inExpectedTypes = {inputType, OperandType::TENSOR_INT32, OperandType::TENSOR_INT32};
+                outExpectedTypes = {inputType};
+            } else {
+                LOG(ERROR) << "Unsupported input tensor type for operation "
                            << kOperationNames[opType];
                 return ANEURALNETWORKS_BAD_DATA;
             }
@@ -2208,7 +2424,24 @@
                                                  outExpectedTypes);
         }
         default: {
-            return ANEURALNETWORKS_BAD_DATA;
+            const OperationRegistration* operationRegistration =
+                    OperationResolver::get()->findOperation(static_cast<OperationType>(opType));
+            if (operationRegistration == nullptr) {
+                if (opType >= 0 && opType < kNumberOfOperationTypes) {
+                    LOG(ERROR) << getOperationName(opType) << " not registered";
+                } else {
+                    LOG(ERROR) << "Operation type " << opType << " not registered";
+                }
+                return ANEURALNETWORKS_UNEXPECTED_NULL;
+            }
+            if (operationRegistration->validate == nullptr) {
+                LOG(ERROR) << "Incomplete operation registration: " << getOperationName(opType);
+                return ANEURALNETWORKS_UNEXPECTED_NULL;
+            }
+            OperationValidationContext context(inputCount, inputIndexes, outputCount, outputIndexes,
+                                               operands.data(), halVersion);
+            return operationRegistration->validate(&context) ? ANEURALNETWORKS_NO_ERROR
+                                                             : ANEURALNETWORKS_BAD_DATA;
         }
     }
 }
diff --git a/nn/common/include/CpuExecutor.h b/nn/common/include/CpuExecutor.h
index 809b59c..ba20e53 100644
--- a/nn/common/include/CpuExecutor.h
+++ b/nn/common/include/CpuExecutor.h
@@ -178,9 +178,9 @@
 
 template <typename T>
 T getScalarData(const RunTimeOperandInfo& info) {
-  // TODO: Check buffer is at least as long as size of data.
-  T* data = reinterpret_cast<T*>(info.buffer);
-  return data[0];
+    // TODO: Check buffer is at least as long as size of data.
+    T* data = reinterpret_cast<T*>(info.buffer);
+    return data[0];
 }
 
 inline bool IsNullInput(const RunTimeOperandInfo *input) {
diff --git a/nn/common/include/OperationResolver.h b/nn/common/include/OperationResolver.h
new file mode 100644
index 0000000..6fac780
--- /dev/null
+++ b/nn/common/include/OperationResolver.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_ML_NN_COMMON_OPERATION_RESOLVER_H
+#define ANDROID_ML_NN_COMMON_OPERATION_RESOLVER_H
+
+#include "HalInterfaces.h"
+#include "OperationsUtils.h"
+
+namespace android {
+namespace nn {
+
+// Encapsulates an operation implementation.
+struct OperationRegistration {
+    OperationType type;
+    const char* name;
+
+    // Validates operand types, shapes, and any values known during graph creation.
+    std::function<bool(const IOperationValidationContext*)> validate;
+
+    // prepare is called when the inputs this operation depends on have been
+    // computed. Typically, prepare does any remaining validation and requests
+    // output tensors to be resized via context->resizeOutputTensor(...).
+    std::function<bool(IOperationExecutionContext*)> prepare;
+
+    // Executes the operation, reading from context->getInputBuffer(...)
+    // and writing to context->getOutputBuffer(...).
+    std::function<bool(IOperationExecutionContext*)> execute;
+};
+
+// A global singleton used to register operation implementations.
+//
+// Usage:
+//   const OperationRegistration* operationRegistration =
+//           OperationResolver::get()->findOperation(operationType);
+//   NN_RET_CHECK(operationRegistration != nullptr);
+//   NN_RET_CHECK(operationRegistration->validate != nullptr);
+//   NN_RET_CHECK(operationRegistration->validate(&context));
+//
+class OperationResolver {
+    DISALLOW_COPY_AND_ASSIGN(OperationResolver);
+
+   public:
+    static const OperationResolver* get() {
+        static OperationResolver instance;
+        return &instance;
+    }
+
+    const OperationRegistration* findOperation(OperationType operationType) const;
+
+   private:
+    OperationResolver();
+
+    void registerOperation(const OperationRegistration* operationRegistration);
+
+    const OperationRegistration* mRegistrations[kNumberOfOperationTypes] = {};
+};
+
+// NN_REGISTER_OPERATION creates OperationRegistration for consumption by
+// OperationResolver.
+//
+// Usage:
+//   namespace android {
+//   namespace nn {
+//   namespace gather {
+//   ...
+//   }  // namespace gather
+//
+//   NN_REGISTER_OPERATION(GATHER, gather::kOperationName, gather::validate,
+//                         gather::prepare, gather::execute);
+//   }  // namespace nn
+//   }  // namespace android
+//
+#ifdef NN_INCLUDE_CPU_IMPLEMENTATION
+#define NN_REGISTER_OPERATION(identifier, operationName, validate, prepare, execute)           \
+    const OperationRegistration* register_##identifier() {                                     \
+        static OperationRegistration registration = {OperationType::identifier, operationName, \
+                                                     validate, prepare, execute};              \
+        return &registration;                                                                  \
+    }
+#else
+// This version ignores CPU execution logic (prepare and execute).
+// The compiler is supposed to omit that code so that only validation logic
+// makes it into libneuralnetworks_utils.
+#define NN_REGISTER_OPERATION(identifier, operationName, validate, unused_prepare, unused_execute) \
+    const OperationRegistration* register_##identifier() {                                         \
+        static OperationRegistration registration = {OperationType::identifier, operationName,     \
+                                                     validate, nullptr, nullptr};                  \
+        return &registration;                                                                      \
+    }
+#endif
+
+}  // namespace nn
+}  // namespace android
+
+#endif  // ANDROID_ML_NN_COMMON_OPERATION_RESOLVER_H
diff --git a/nn/common/include/Operations.h b/nn/common/include/Operations.h
index e9259b6..feed2b7 100644
--- a/nn/common/include/Operations.h
+++ b/nn/common/include/Operations.h
@@ -20,7 +20,6 @@
 #include "operations/Cast.h"
 #include "operations/EmbeddingLookup.h"
 #include "operations/ExpandDims.h"
-#include "operations/Gather.h"
 #include "operations/HashtableLookup.h"
 #include "operations/LSHProjection.h"
 #include "operations/LSTM.h"
@@ -30,6 +29,7 @@
 #include "operations/QuantizedLSTM.h"
 #include "operations/RNN.h"
 #include "operations/SVDF.h"
+#include "operations/Slice.h"
 #include "operations/Tile.h"
 #include "operations/TopK_V2.h"
 
@@ -63,17 +63,23 @@
 
 bool quantizeFloat32ToQuant8(const float* inputData, uint8_t* outputData, const Shape& outputShape);
 
+bool depthwiseConvFloat16(const _Float16* inputData, const Shape& inputShape,
+                          const _Float16* filterData, const Shape& filterShape,
+                          const _Float16* biasData, const Shape& biasShape, int32_t paddingLeft,
+                          int32_t paddingRight, int32_t paddingTop, int32_t paddingBottom,
+                          int32_t strideWidth, int32_t strideHeight, int32_t depthMultiplier,
+                          int32_t activation, _Float16* outputData, const Shape& outputShape);
 bool depthwiseConvFloat32(const float* inputData, const Shape& inputShape, const float* filterData,
                           const Shape& filterShape, const float* biasData, const Shape& biasShape,
-                          int32_t padding_left, int32_t padding_right, int32_t padding_top,
-                          int32_t padding_bottom, int32_t stride_width, int32_t stride_height,
-                          int32_t depth_multiplier, int32_t activation, float* outputData,
+                          int32_t paddingLeft, int32_t paddingRight, int32_t paddingTop,
+                          int32_t paddingBottom, int32_t strideWidth, int32_t strideHeight,
+                          int32_t depthMultiplier, int32_t activation, float* outputData,
                           const Shape& outputShape);
 bool depthwiseConvQuant8(const uint8_t* inputData, const Shape& inputShape,
                          const uint8_t* filterData, const Shape& filterShape,
-                         const int32_t* biasData, const Shape& biasShape, int32_t padding_left,
-                         int32_t padding_right, int32_t padding_top, int32_t padding_bottom,
-                         int32_t stride_width, int32_t stride_height, int32_t depth_multiplier,
+                         const int32_t* biasData, const Shape& biasShape, int32_t paddingLeft,
+                         int32_t paddingRight, int32_t paddingTop, int32_t paddingBottom,
+                         int32_t strideWidth, int32_t strideHeight, int32_t depthMultiplier,
                          int32_t activation, uint8_t* outputData, const Shape& outputShape);
 
 bool convFloat32(const float* inputData, const Shape& inputShape, const float* filterData,
@@ -169,30 +175,33 @@
                               float bias, float alpha, float beta, int32_t axis, float* outputData,
                               const Shape& outputShape);
 
-bool reshapeGeneric(const void* inputData, const Shape& inputShape, void* outputData,
-                    const Shape& outputShape);
+bool copyData(const void* inputData, const Shape& inputShape, void* outputData,
+              const Shape& outputShape);
 
+bool resizeBilinearFloat16(const _Float16* inputData, const Shape& inputShape, _Float16* outputData,
+                           const Shape& outputShape);
 bool resizeBilinearFloat32(const float* inputData, const Shape& inputShape, float* outputData,
                            const Shape& outputShape);
 
-bool depthToSpaceGeneric(const uint8_t* inputData, const Shape& inputShape, int32_t blockSize,
-                         uint8_t* outputData, const Shape& outputShape);
+template <typename T>
+bool depthToSpaceGeneric(const T* inputData, const Shape& inputShape, int32_t blockSize,
+                         T* outputData, const Shape& outputShape);
+template <typename T>
+bool spaceToDepthGeneric(const T* inputData, const Shape& inputShape, int32_t blockSize,
+                         T* outputData, const Shape& outputShape);
 
-bool spaceToDepthGeneric(const uint8_t* inputData, const Shape& inputShape, int32_t blockSize,
-                         uint8_t* outputData, const Shape& outputShape);
+template <typename T>
+bool padGeneric(const T* inputData, const Shape& inputShape, const int32_t* paddings, T pad_value,
+                T* outputData, const Shape& outputShape);
 
-bool padFloat32(const float* inputData, const Shape& inputShape, const int32_t* paddings,
-                float pad_value, float* outputData, const Shape& outputShape);
+template <typename T>
+bool batchToSpaceGeneric(const T* inputData, const Shape& inputShape, const int32_t* blockSize,
+                         T* outputData, const Shape& outputShape);
 
-bool padQuant8(const uint8_t* inputData, const Shape& inputShape, const int32_t* paddings,
-               uint8_t pad_value, uint8_t* outputData, const Shape& outputShape);
-
-bool batchToSpaceGeneric(const uint8_t* inputData, const Shape& inputShape,
-                         const int32_t* blockSize, uint8_t* outputData, const Shape& outputShape);
-
-bool spaceToBatchGeneric(const uint8_t* inputData, const Shape& inputShape,
-                         const int32_t* blockSize, const int32_t* padding,
-                         const Shape& paddingShape, uint8_t* outputData, const Shape& outputShape);
+template <typename T>
+bool spaceToBatchGeneric(const T* inputData, const Shape& inputShape, const int32_t* blockSize,
+                         const int32_t* padding, const Shape& paddingShape, T* outputData,
+                         const Shape& outputShape);
 
 bool subFloat16(const _Float16* in1, const Shape& shape1, const _Float16* in2, const Shape& shape2,
                 int32_t activation, _Float16* out, const Shape& shapeOut);
@@ -203,16 +212,14 @@
 bool subQuant8(const uint8_t* in1, const Shape& shape1, const uint8_t* in2, const Shape& shape2,
                int32_t activation, uint8_t* out, const Shape& shapeOut);
 
-bool squeezeGeneric(const void* inputData, const Shape& inputShape, void* outputData,
-                    const Shape& outputShape);
-
 bool divFloat16(const _Float16* in1, const Shape& shape1, const _Float16* in2, const Shape& shape2,
                 int32_t activation, _Float16* out, const Shape& shapeOut);
 bool divFloat32(const float* in1, const Shape& shape1, const float* in2, const Shape& shape2,
                 int32_t activation, float* out, const Shape& shapeOut);
 
-bool transposeGeneric(const uint8_t* inputData, const Shape& inputShape, const int32_t* perm,
-                      const Shape& permShape, uint8_t* outputData, const Shape& outputShape);
+template <typename T>
+bool transposeGeneric(const T* inputData, const Shape& inputShape, const int32_t* perm,
+                      const Shape& permShape, T* outputData, const Shape& outputShape);
 
 bool meanGeneric(const uint8_t* inputData, const Shape& inputShape, const int32_t* axis,
                  const Shape& axisShape, bool keepDims, uint8_t* outputData,
@@ -250,6 +257,10 @@
                     const Shape& roiShape, float spatialScale, int32_t samplingRatio,
                     uint8_t* outputData, const Shape& outputShape);
 
+bool roiPoolingGeneric(const uint8_t* inputData, const Shape& inputShape, const uint8_t* roiData,
+                       const Shape& roiShape, float spatialScale, uint8_t* outputData,
+                       const Shape& outputShape);
+
 bool heatmapMaxKeypoint(const float* heatmap, const Shape& heatmapShape, const float* boxes,
                         const Shape& boxesShape, float* outputData, const Shape& outputShape);
 
diff --git a/nn/common/include/OperationsUtils.h b/nn/common/include/OperationsUtils.h
index b77a0d0..52c6b2c 100644
--- a/nn/common/include/OperationsUtils.h
+++ b/nn/common/include/OperationsUtils.h
@@ -113,6 +113,72 @@
     int32_t offset;
 };
 
+// Provides information available during graph creation to validate an operation.
+class IOperationValidationContext {
+   public:
+    virtual ~IOperationValidationContext() {}
+
+    // The HAL version used to validate the operation.
+    // If getHalVersion() returns HalVersion::V1_0 and the operation
+    // is only supported since HalVersion::V1_1, validation will fail.
+    virtual HalVersion getHalVersion() const = 0;
+
+    virtual uint32_t getNumInputs() const = 0;
+    virtual OperandType getInputType(uint32_t index) const = 0;
+    virtual Shape getInputShape(uint32_t index) const = 0;
+
+    virtual uint32_t getNumOutputs() const = 0;
+    virtual OperandType getOutputType(uint32_t index) const = 0;
+    virtual Shape getOutputShape(uint32_t index) const = 0;
+};
+
+// Provides inputs and outputs during operation execution.
+class IOperationExecutionContext {
+   public:
+    virtual ~IOperationExecutionContext() {}
+
+    virtual uint32_t getNumInputs() const = 0;
+    virtual OperandType getInputType(uint32_t index) const = 0;
+    virtual Shape getInputShape(uint32_t index) const = 0;
+    virtual const void* getInputBuffer(uint32_t index) const = 0;
+
+    virtual uint32_t getNumOutputs() const = 0;
+    virtual OperandType getOutputType(uint32_t index) const = 0;
+    virtual Shape getOutputShape(uint32_t index) const = 0;
+    virtual void* getOutputBuffer(uint32_t index) = 0;
+
+    // Requests the output buffer to be resized. Updates the output shape.
+    virtual bool resizeOutputTensor(uint32_t index, const Shape& shape) = 0;
+
+    template <typename T>
+    const T* getInputBuffer(uint32_t index) const {
+        return reinterpret_cast<const T*>(getInputBuffer(index));
+    }
+
+    template <typename T>
+    T* getOutputBuffer(uint32_t index) {
+        return reinterpret_cast<T*>(getOutputBuffer(index));
+    }
+
+    template <typename T>
+    T getInputValue(uint32_t index) const {
+        return getInputBuffer<T>(index)[0];
+    }
+};
+
+// Verifies that the number and types of operation inputs are as expected.
+bool validateInputTypes(const IOperationValidationContext* context,
+                        const std::vector<OperandType>& expectedTypes);
+
+// Verifies that the number and types of operation outputs are as expected.
+bool validateOutputTypes(const IOperationValidationContext* context,
+                         const std::vector<OperandType>& expectedTypes);
+
+// Verifies that the HAL version specified in the context is greater or equal
+// than the minimal supported HAL version.
+bool validateHalVersion(const IOperationValidationContext* context,
+                        HalVersion minSupportedHalVersion);
+
 // Verifies that the two shapes are the same.
 bool SameShape(const Shape& in1, const Shape& in2);
 
diff --git a/nn/common/operations/Abs.cpp b/nn/common/operations/Abs.cpp
new file mode 100644
index 0000000..522e460
--- /dev/null
+++ b/nn/common/operations/Abs.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Operations"
+
+#include "HalInterfaces.h"
+#include "OperationResolver.h"
+#include "OperationsUtils.h"
+#include "Tracing.h"
+
+#include <cmath>
+
+namespace android {
+namespace nn {
+namespace abs {
+
+constexpr char kOperationName[] = "ABS";
+
+constexpr uint32_t kNumInputs = 1;
+constexpr uint32_t kInputTensor = 0;
+
+constexpr uint32_t kNumOutputs = 1;
+constexpr uint32_t kOutputTensor = 0;
+
+namespace {
+
+template <typename T>
+inline bool compute(const T* input, const Shape& shape, T* output) {
+    const auto size = getNumberOfElements(shape);
+    for (uint32_t i = 0; i < size; ++i) {
+        output[i] = static_cast<T>(std::abs(static_cast<float>(input[i])));
+    }
+    return true;
+}
+
+}  // namespace
+
+bool validate(const IOperationValidationContext* context) {
+    NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
+    NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs);
+    OperandType inputType = context->getInputType(kInputTensor);
+    NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 ||
+                 inputType == OperandType::TENSOR_FLOAT32)
+            << "Unsupported tensor type for operation " << kOperationName;
+    NN_RET_CHECK(validateInputTypes(context, {inputType}));
+    NN_RET_CHECK(validateOutputTypes(context, {inputType}));
+    return validateHalVersion(context, HalVersion::V1_2);
+}
+
+bool prepare(IOperationExecutionContext* context) {
+    Shape input = context->getInputShape(kInputTensor);
+    Shape output = context->getOutputShape(kOutputTensor);
+    NN_RET_CHECK(SetShape(input, &output));
+    return context->resizeOutputTensor(kOutputTensor, output);
+}
+
+bool execute(IOperationExecutionContext* context) {
+    switch (context->getInputType(kInputTensor)) {
+        case OperandType::TENSOR_FLOAT16:
+            return compute(context->getInputBuffer<_Float16>(kInputTensor),
+                           context->getInputShape(kInputTensor),
+                           context->getOutputBuffer<_Float16>(kOutputTensor));
+        case OperandType::TENSOR_FLOAT32:
+            return compute(context->getInputBuffer<float>(kInputTensor),
+                           context->getInputShape(kInputTensor),
+                           context->getOutputBuffer<float>(kOutputTensor));
+        default:
+            NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
+    }
+}
+
+}  // namespace abs
+
+NN_REGISTER_OPERATION(ABS, abs::kOperationName, abs::validate, abs::prepare, abs::execute);
+
+}  // namespace nn
+}  // namespace android
diff --git a/nn/common/operations/ArgMinMax.cpp b/nn/common/operations/ArgMinMax.cpp
index 87e6d3c..b323d51 100644
--- a/nn/common/operations/ArgMinMax.cpp
+++ b/nn/common/operations/ArgMinMax.cpp
@@ -71,6 +71,7 @@
         return true;                                                           \
     }
 
+    NNAPI_IMPL_ARG_MIN_MAX(OperandType::TENSOR_FLOAT16, _Float16);
     NNAPI_IMPL_ARG_MIN_MAX(OperandType::TENSOR_FLOAT32, float);
     NNAPI_IMPL_ARG_MIN_MAX(OperandType::TENSOR_INT32, int32_t);
     NNAPI_IMPL_ARG_MIN_MAX(OperandType::TENSOR_QUANT8_ASYMM, uint8_t);
diff --git a/nn/common/operations/Concatenation.cpp b/nn/common/operations/Concatenation.cpp
index 2362f49..d6fecbd 100644
--- a/nn/common/operations/Concatenation.cpp
+++ b/nn/common/operations/Concatenation.cpp
@@ -14,10 +14,11 @@
  * limitations under the License.
  */
 
-#include "Operations.h"
 #include "CpuOperationUtils.h"
+#include "Operations.h"
 
 #include "tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h"
+#include "tensorflow/contrib/lite/kernels/internal/reference/legacy_reference_ops.h"
 
 #include "Tracing.h"
 
@@ -50,9 +51,32 @@
 template bool concatenation<_Float16>(const std::vector<const _Float16*>& inputDataPtrs,
                                       const std::vector<Shape>& inputShapes, int32_t axis,
                                       _Float16* outputData, const Shape& outputShape);
-template bool concatenation<uint8_t>(const std::vector<const uint8_t*>& inputDataPtrs,
-                                     const std::vector<Shape>& inputShapes, int32_t axis,
-                                     uint8_t* outputData, const Shape& outputShape);
+
+template <>
+bool concatenation<uint8_t>(const std::vector<const uint8_t*>& inputDataPtrs,
+                            const std::vector<Shape>& inputShapes, int32_t axis,
+                            uint8_t* outputData, const Shape& outputShape) {
+    NNTRACE_TRANS("concatenationQuant8");
+    int num_inputs = inputShapes.size();
+    std::vector<float> inputScales(num_inputs);
+    std::vector<int32> inputOffsets(num_inputs);
+    std::vector<tflite::Dims<4>*> inputDimsPtr(num_inputs);
+    std::vector<tflite::Dims<4> > inputDims(num_inputs);
+    for (int i = 0; i < num_inputs; i++) {
+        inputScales[i] = inputShapes[i].scale;
+        inputOffsets[i] = inputShapes[i].offset;
+        inputDims[i] = convertShapeToDims(inputShapes[i]);
+        inputDimsPtr[i] = &inputDims[i];
+    }
+
+    NNTRACE_COMP_SWITCH("reference_ops::Concatenation");
+    tflite::reference_ops::Concatenation(
+            getNumberOfDimensions(outputShape) - axis - 1, inputDataPtrs.data(),
+            inputDimsPtr.data(), inputOffsets.data(), inputScales.data(), num_inputs, outputData,
+            convertShapeToDims(outputShape), outputShape.offset, outputShape.scale);
+
+    return true;
+}
 
 }  // namespace nn
 }  // namespace android
diff --git a/nn/common/operations/DepthwiseConv2D.cpp b/nn/common/operations/DepthwiseConv2D.cpp
index 60c5e41..9728cf1 100644
--- a/nn/common/operations/DepthwiseConv2D.cpp
+++ b/nn/common/operations/DepthwiseConv2D.cpp
@@ -25,55 +25,70 @@
 namespace android {
 namespace nn {
 
-#define ANDROID_NN_DEPTHWISE_CONV_PARAMETERS                                    \
-    uint32_t height       = getSizeOfDimension(inputShape, 1);                  \
-    uint32_t width        = getSizeOfDimension(inputShape, 2);                  \
-    uint32_t filterHeight = getSizeOfDimension(filterShape, 1);                 \
-    uint32_t filterWidth  = getSizeOfDimension(filterShape, 2);                 \
-    uint32_t outHeight    = getSizeOfDimension(outputShape, 1);                 \
-    uint32_t outWidth     = getSizeOfDimension(outputShape, 2);                 \
-                                                                                \
-    uint32_t paddingHeight = (uint32_t)padding_top;                             \
-    uint32_t paddingWidth = (uint32_t)padding_left;
+bool depthwiseConvFloat16(const _Float16* inputData, const Shape& inputShape,
+                          const _Float16* filterData, const Shape& filterShape,
+                          const _Float16* biasData, const Shape& biasShape, int32_t paddingLeft,
+                          int32_t paddingRight, int32_t paddingTop, int32_t paddingBottom,
+                          int32_t strideWidth, int32_t strideHeight, int32_t depthMultiplier,
+                          int32_t activation, _Float16* outputData, const Shape& outputShape) {
+    NNTRACE_TRANS("depthwiseConvFloat16");
+    std::vector<float> inputDataFloat32(getNumberOfElements(inputShape));
+    convertFloat16ToFloat32(inputData, &inputDataFloat32);
+    std::vector<float> filterDataFloat32(getNumberOfElements(filterShape));
+    convertFloat16ToFloat32(filterData, &filterDataFloat32);
+    std::vector<float> biasDataFloat32(getNumberOfElements(biasShape));
+    convertFloat16ToFloat32(biasData, &biasDataFloat32);
 
-bool depthwiseConvFloat32(const float* inputData, const Shape& inputShape,
-                          const float* filterData, const Shape& filterShape,
-                          const float* biasData, const Shape& biasShape,
-                          int32_t padding_left, int32_t padding_right,
-                          int32_t padding_top, int32_t padding_bottom,
-                          int32_t stride_width, int32_t stride_height,
-                          int32_t depth_multiplier, int32_t activation,
-                          float* outputData, const Shape& outputShape) {
+    std::vector<float> outputDataFloat32(getNumberOfElements(outputShape));
+    depthwiseConvFloat32(inputDataFloat32.data(), inputShape, filterDataFloat32.data(), filterShape,
+                         biasDataFloat32.data(), biasShape, paddingLeft, paddingRight, paddingTop,
+                         paddingBottom, strideWidth, strideHeight, depthMultiplier, activation,
+                         outputDataFloat32.data(), outputShape);
+
+    convertFloat32ToFloat16(outputDataFloat32, outputData);
+    return true;
+}
+
+#define ANDROID_NN_DEPTHWISE_CONV_PARAMETERS                    \
+    uint32_t height = getSizeOfDimension(inputShape, 1);        \
+    uint32_t width = getSizeOfDimension(inputShape, 2);         \
+    uint32_t filterHeight = getSizeOfDimension(filterShape, 1); \
+    uint32_t filterWidth = getSizeOfDimension(filterShape, 2);  \
+    uint32_t outHeight = getSizeOfDimension(outputShape, 1);    \
+    uint32_t outWidth = getSizeOfDimension(outputShape, 2);     \
+                                                                \
+    uint32_t paddingHeight = (uint32_t)paddingTop;              \
+    uint32_t paddingWidth = (uint32_t)paddingLeft;
+
+bool depthwiseConvFloat32(const float* inputData, const Shape& inputShape, const float* filterData,
+                          const Shape& filterShape, const float* biasData, const Shape& biasShape,
+                          int32_t paddingLeft, int32_t paddingRight, int32_t paddingTop,
+                          int32_t paddingBottom, int32_t strideWidth, int32_t strideHeight,
+                          int32_t depthMultiplier, int32_t activation, float* outputData,
+                          const Shape& outputShape) {
     NNTRACE_TRANS("depthwiseConvFloat32");
 
     ANDROID_NN_DEPTHWISE_CONV_PARAMETERS
 
     float output_activation_min, output_activation_max;
-    CalculateActivationRangeFloat(activation, &output_activation_min,
-                                  &output_activation_max);
+    CalculateActivationRangeFloat(activation, &output_activation_min, &output_activation_max);
 
     NNTRACE_COMP_SWITCH("optimized_ops::DepthwiseConv");
     tflite::optimized_ops::DepthwiseConv(
-            inputData, convertShapeToDims(inputShape),
-            filterData, convertShapeToDims(filterShape),
-            biasData, convertShapeToDims(biasShape),
-            stride_width, stride_height,
-            paddingWidth, paddingHeight, depth_multiplier,
-            output_activation_min, output_activation_max,
+            inputData, convertShapeToDims(inputShape), filterData, convertShapeToDims(filterShape),
+            biasData, convertShapeToDims(biasShape), strideWidth, strideHeight, paddingWidth,
+            paddingHeight, depthMultiplier, output_activation_min, output_activation_max,
             outputData, convertShapeToDims(outputShape));
 
     return true;
 }
 
-
 bool depthwiseConvQuant8(const uint8_t* inputData, const Shape& inputShape,
                          const uint8_t* filterData, const Shape& filterShape,
-                         const int32_t* biasData, const Shape& biasShape,
-                         int32_t padding_left, int32_t padding_right,
-                         int32_t padding_top, int32_t padding_bottom,
-                         int32_t stride_width, int32_t stride_height,
-                         int32_t depth_multiplier, int32_t activation,
-                         uint8_t* outputData, const Shape& outputShape) {
+                         const int32_t* biasData, const Shape& biasShape, int32_t paddingLeft,
+                         int32_t paddingRight, int32_t paddingTop, int32_t paddingBottom,
+                         int32_t strideWidth, int32_t strideHeight, int32_t depthMultiplier,
+                         int32_t activation, uint8_t* outputData, const Shape& outputShape) {
     NNTRACE_TRANS("depthwiseConvQuant8");
 
     ANDROID_NN_DEPTHWISE_CONV_PARAMETERS
@@ -101,13 +116,10 @@
 
     NNTRACE_COMP_SWITCH("optimized_ops::DepthwiseConv");
     tflite::optimized_ops::DepthwiseConv(
-            inputData, convertShapeToDims(inputShape), inputOffset,
-            filterData, convertShapeToDims(filterShape), filterOffset,
-            biasData, convertShapeToDims(biasShape),
-            stride_width, stride_height,
-            paddingWidth, paddingHeight, depth_multiplier,
-            outputOffset, output_multiplier, output_shift,
-            output_activation_min, output_activation_max,
+            inputData, convertShapeToDims(inputShape), inputOffset, filterData,
+            convertShapeToDims(filterShape), filterOffset, biasData, convertShapeToDims(biasShape),
+            strideWidth, strideHeight, paddingWidth, paddingHeight, depthMultiplier, outputOffset,
+            output_multiplier, output_shift, output_activation_min, output_activation_max,
             outputData, convertShapeToDims(outputShape));
 
     return true;
diff --git a/nn/common/operations/Gather.cpp b/nn/common/operations/Gather.cpp
index 0c8f7d6..82eec67 100644
--- a/nn/common/operations/Gather.cpp
+++ b/nn/common/operations/Gather.cpp
@@ -16,29 +16,42 @@
 
 #define LOG_TAG "Operations"
 
-#include "Gather.h"
+#include "HalInterfaces.h"
+#include "OperationResolver.h"
+#include "OperationsUtils.h"
 #include "Tracing.h"
 
 namespace android {
 namespace nn {
 namespace gather {
 
+constexpr char kOperationName[] = "GATHER";
+
+constexpr uint32_t kNumInputs = 3;
+constexpr uint32_t kInputTensor = 0;
+constexpr uint32_t kInputAxis = 1;
+constexpr uint32_t kInputIndices = 2;
+
+constexpr uint32_t kNumOutputs = 1;
+constexpr uint32_t kOutputTensor = 0;
+
 namespace {
 
 template <typename T>
-inline bool gatherImpl(const T* inputData, const Shape& inputShape, int32_t axis,
-                       const int32_t* indicesData, const Shape& indicesShape, T* outputData,
-                       const Shape& outputShape) {
+inline bool eval(const T* inputData, const Shape& inputShape, int32_t axis,
+                 const int32_t* indicesData, const Shape& indicesShape, T* outputData) {
     const auto outerSize = getNumberOfElements(inputShape, 0, axis);
     const auto axisSize = getSizeOfDimension(inputShape, axis);
     const auto innerSize =
             getNumberOfElements(inputShape, axis + 1, getNumberOfDimensions(inputShape));
     const auto indicesCount = getNumberOfElements(indicesShape);
-    for (int outer = 0; outer < outerSize; ++outer) {
-        for (int i = 0; i < indicesCount; ++i) {
-            NN_OPS_CHECK(0 <= indicesData[i] && indicesData[i] < axisSize);
-            std::memcpy(outputData + (outer * indicesCount + i) * innerSize,
-                        inputData + (outer * axisSize + indicesData[i]) * innerSize,
+    for (uint32_t outer = 0; outer < outerSize; ++outer) {
+        for (uint32_t outputIndex = 0; outputIndex < indicesCount; ++outputIndex) {
+            const auto inputIndex = static_cast<uint32_t>(indicesData[outputIndex]);
+            NN_RET_CHECK_LE(0u, inputIndex);
+            NN_RET_CHECK_LT(inputIndex, axisSize);
+            std::memcpy(outputData + (outer * indicesCount + outputIndex) * innerSize,
+                        inputData + (outer * axisSize + inputIndex) * innerSize,
                         sizeof(T) * innerSize);
         }
     }
@@ -47,45 +60,76 @@
 
 }  // namespace
 
-bool prepare(const Shape& input, int32_t axis, const Shape& indices, Shape* output) {
-    NN_CHECK(handleNegativeAxis(input, &axis));
-    output->dimensions.clear();
-    output->dimensions.reserve(getNumberOfDimensions(input) + getNumberOfDimensions(indices) - 1);
-    output->dimensions.insert(output->dimensions.end(), input.dimensions.begin(),
-                              input.dimensions.begin() + axis);
-    output->dimensions.insert(output->dimensions.end(), indices.dimensions.begin(),
-                              indices.dimensions.end());
-    output->dimensions.insert(output->dimensions.end(), input.dimensions.begin() + axis + 1,
-                              input.dimensions.end());
-    return true;
+bool validate(const IOperationValidationContext* context) {
+    NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
+    NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs);
+    OperandType inputType = context->getInputType(kInputTensor);
+    NN_RET_CHECK(
+            inputType == OperandType::TENSOR_FLOAT16 || inputType == OperandType::TENSOR_FLOAT32 ||
+            inputType == OperandType::TENSOR_INT32 || inputType == OperandType::TENSOR_QUANT8_ASYMM)
+            << "Unsupported tensor type for operation " << kOperationName;
+    NN_RET_CHECK(validateInputTypes(context,
+                                    {inputType, OperandType::INT32, OperandType::TENSOR_INT32}));
+    NN_RET_CHECK(validateOutputTypes(context, {inputType}));
+    return validateHalVersion(context, HalVersion::V1_2);
 }
 
-bool compute(const uint8_t* inputData, const Shape& inputShape, int32_t axis,
-             const int32_t* indicesData, const Shape& indicesShape, uint8_t* outputData,
-             const Shape& outputShape) {
-    NNTRACE_TRANS("gather::compute");
-    NN_CHECK(handleNegativeAxis(inputShape, &axis));
+bool prepare(IOperationExecutionContext* context) {
+    Shape input = context->getInputShape(kInputTensor);
+    int32_t axis = context->getInputValue<int32_t>(kInputAxis);
+    NN_RET_CHECK(handleNegativeAxis(input, &axis));
+    Shape indices = context->getInputShape(kInputIndices);
+    Shape output = context->getOutputShape(kOutputTensor);
 
-#define ANDROID_NN_GATHER(operandType, dataType)                                                \
-    case operandType: {                                                                         \
-        NNTRACE_TRANS("gatherImpl::" #dataType);                                                \
-        gatherImpl(reinterpret_cast<const dataType*>(inputData), inputShape, axis, indicesData, \
-                   indicesShape, reinterpret_cast<dataType*>(outputData), outputShape);         \
-        return true;                                                                            \
-    }
+    output.dimensions.clear();
+    output.dimensions.reserve(getNumberOfDimensions(input) + getNumberOfDimensions(indices) - 1);
+    output.dimensions.insert(output.dimensions.end(), input.dimensions.begin(),
+                             input.dimensions.begin() + axis);
+    output.dimensions.insert(output.dimensions.end(), indices.dimensions.begin(),
+                             indices.dimensions.end());
+    output.dimensions.insert(output.dimensions.end(), input.dimensions.begin() + axis + 1,
+                             input.dimensions.end());
 
-    switch (inputShape.type) {
-        ANDROID_NN_GATHER(OperandType::TENSOR_FLOAT16, _Float16);
-        ANDROID_NN_GATHER(OperandType::TENSOR_FLOAT32, float);
-        ANDROID_NN_GATHER(OperandType::TENSOR_INT32, int32_t);
-        ANDROID_NN_GATHER(OperandType::TENSOR_QUANT8_ASYMM, uint8_t);
+    return context->resizeOutputTensor(kOutputTensor, output);
+}
+
+bool execute(IOperationExecutionContext* context) {
+    int32_t axis = context->getInputValue<int32_t>(kInputAxis);
+    NN_RET_CHECK(handleNegativeAxis(context->getInputShape(kInputTensor), &axis));
+    switch (context->getInputType(kInputTensor)) {
+        case OperandType::TENSOR_FLOAT16:
+            return eval(context->getInputBuffer<_Float16>(kInputTensor),
+                        context->getInputShape(kInputTensor), axis,
+                        context->getInputBuffer<int32_t>(kInputIndices),
+                        context->getInputShape(kInputIndices),
+                        context->getOutputBuffer<_Float16>(kOutputTensor));
+        case OperandType::TENSOR_FLOAT32:
+            return eval(context->getInputBuffer<float>(kInputTensor),
+                        context->getInputShape(kInputTensor), axis,
+                        context->getInputBuffer<int32_t>(kInputIndices),
+                        context->getInputShape(kInputIndices),
+                        context->getOutputBuffer<float>(kOutputTensor));
+        case OperandType::TENSOR_INT32:
+            return eval(context->getInputBuffer<int32_t>(kInputTensor),
+                        context->getInputShape(kInputTensor), axis,
+                        context->getInputBuffer<int32_t>(kInputIndices),
+                        context->getInputShape(kInputIndices),
+                        context->getOutputBuffer<int32_t>(kOutputTensor));
+        case OperandType::TENSOR_QUANT8_ASYMM:
+            return eval(context->getInputBuffer<uint8_t>(kInputTensor),
+                        context->getInputShape(kInputTensor), axis,
+                        context->getInputBuffer<int32_t>(kInputIndices),
+                        context->getInputShape(kInputIndices),
+                        context->getOutputBuffer<uint8_t>(kOutputTensor));
         default:
-            LOG(ERROR) << "Unsupported GATHER output type";
-            return false;
+            NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
-#undef ANDROID_NN_GATHER
 }
 
 }  // namespace gather
+
+NN_REGISTER_OPERATION(GATHER, gather::kOperationName, gather::validate, gather::prepare,
+                      gather::execute);
+
 }  // namespace nn
 }  // namespace android
diff --git a/nn/common/operations/Gather.h b/nn/common/operations/Gather.h
deleted file mode 100644
index 95d61a0..0000000
--- a/nn/common/operations/Gather.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef FRAMEWORKS_ML_NN_GATHER_H
-#define FRAMEWORKS_ML_NN_GATHER_H
-
-#include "CpuOperationUtils.h"
-
-namespace android {
-namespace nn {
-namespace gather {
-
-bool prepare(const Shape& input, int32_t axis, const Shape& indices, Shape* output);
-
-bool compute(const uint8_t* inputData, const Shape& inputShape, int32_t axis,
-             const int32_t* indicesData, const Shape& indicesShape, uint8_t* outputData,
-             const Shape& outputShape);
-
-}  // namespace gather
-}  // namespace nn
-}  // namespace android
-
-#endif  // FRAMEWORKS_ML_NN_GATHER_H
diff --git a/nn/common/operations/Reshape.cpp b/nn/common/operations/Reshape.cpp
index dc46bfb..ba65bb1 100644
--- a/nn/common/operations/Reshape.cpp
+++ b/nn/common/operations/Reshape.cpp
@@ -29,19 +29,32 @@
 namespace android {
 namespace nn {
 
-bool reshapeGeneric(const void* inputData, const Shape& inputShape,
-                    void* outputData, const Shape& outputShape) {
-    NNTRACE_COMP("reshapeGeneric");
+bool copyData(const void* inputData, const Shape& inputShape, void* outputData,
+              const Shape& outputShape) {
+    NNTRACE_COMP("copyData");
     size_t count = sizeOfData(inputShape.type, inputShape.dimensions);
     memcpy(outputData, inputData, count);
     return true;
 }
 
+bool resizeBilinearFloat16(const _Float16* inputData, const Shape& inputShape, _Float16* outputData,
+                           const Shape& outputShape) {
+    NNTRACE_TRANS("resizeBilinearFloat16");
+    std::vector<float> inputData_float32(getNumberOfElements(inputShape));
+    convertFloat16ToFloat32(inputData, &inputData_float32);
+    std::vector<float> outputData_float32(getNumberOfElements(outputShape));
+
+    resizeBilinearFloat32(inputData_float32.data(), inputShape, outputData_float32.data(),
+                          outputShape);
+    convertFloat32ToFloat16(outputData_float32, outputData);
+    return true;
+}
+
 bool resizeBilinearFloat32(const float* inputData, const Shape& inputShape,
                            float* outputData, const Shape& outputShape) {
     NNTRACE_TRANS("resizeBilinearFloat32");
-    int32_t height = (int32_t) getSizeOfDimension(outputShape, 1);
-    int32_t width  = (int32_t) getSizeOfDimension(outputShape, 2);
+    int32_t height = static_cast<int32_t>(getSizeOfDimension(outputShape, 1));
+    int32_t width = static_cast<int32_t>(getSizeOfDimension(outputShape, 2));
 
     int32_t outDimData[2] = {height, width};
     // We have to fake a tensor here, to satisfy ResizeBilinear().
@@ -56,63 +69,45 @@
     return true;
 }
 
-bool depthToSpaceGeneric(const uint8_t* inputData, const Shape& inputShape,
-                         int32_t blockSize,
-                         uint8_t* outputData, const Shape& outputShape) {
-    NNTRACE_TRANS("depthToSpaceGeneric");
-    if (inputShape.type == OperandType::TENSOR_FLOAT32) {
-        NNTRACE_COMP_SWITCH("optimized_ops::DepthToSpace::float");
-        tflite::optimized_ops::DepthToSpace(
-                 reinterpret_cast<const float*>(inputData),
-                 convertShapeToDims(inputShape),
-                 blockSize,
-                 reinterpret_cast<float*>(outputData),
-                 convertShapeToDims(outputShape));
-    } else if (inputShape.type == OperandType::TENSOR_QUANT8_ASYMM) {
-        NNTRACE_COMP_SWITCH("optimized_ops::DepthToSpace::uint8");
-        tflite::optimized_ops::DepthToSpace(
-                reinterpret_cast<const uint8_t*>(inputData),
-                convertShapeToDims(inputShape),
-                blockSize,
-                reinterpret_cast<uint8_t*>(outputData),
-                convertShapeToDims(outputShape));
-    } else {
-        LOG(ERROR) << "Unsupported data type";
-        return false;
-    }
+template <typename T>
+bool depthToSpaceGeneric(const T* inputData, const Shape& inputShape, int32_t blockSize,
+                         T* outputData, const Shape& outputShape) {
+    NNTRACE_COMP("optimized_ops::DepthToSpace");
+    tflite::optimized_ops::DepthToSpace(inputData, convertShapeToDims(inputShape), blockSize,
+                                        outputData, convertShapeToDims(outputShape));
     return true;
 }
-
-bool spaceToDepthGeneric(const uint8_t* inputData, const Shape& inputShape,
-                         int32_t blockSize,
-                         uint8_t* outputData, const Shape& outputShape) {
-    NNTRACE_TRANS("spaceToDepthGeneric");
-    if (inputShape.type == OperandType::TENSOR_FLOAT32) {
-        NNTRACE_COMP_SWITCH("optimized_ops::SpaceToDepth::float");
-        tflite::optimized_ops::SpaceToDepth(
-                reinterpret_cast<const float*>(inputData),
-                convertShapeToDims(inputShape),
-                blockSize,
-                reinterpret_cast<float*>(outputData),
-                convertShapeToDims(outputShape));
-    } else if (inputShape.type == OperandType::TENSOR_QUANT8_ASYMM) {
-        NNTRACE_COMP_SWITCH("optimized_ops::SpaceToDepth::uint8");
-        tflite::optimized_ops::SpaceToDepth(
-                reinterpret_cast<const uint8_t*>(inputData),
-                convertShapeToDims(inputShape),
-                blockSize,
-                reinterpret_cast<uint8_t*>(outputData),
-                convertShapeToDims(outputShape));
-    } else {
-        LOG(ERROR) << "Unsupported data type";
-        return false;
-    }
-    return true;
-}
+template bool depthToSpaceGeneric<float>(const float* inputData, const Shape& inputShape,
+                                         int32_t blockSize, float* outputData,
+                                         const Shape& outputShape);
+template bool depthToSpaceGeneric<_Float16>(const _Float16* inputData, const Shape& inputShape,
+                                            int32_t blockSize, _Float16* outputData,
+                                            const Shape& outputShape);
+template bool depthToSpaceGeneric<uint8_t>(const uint8_t* inputData, const Shape& inputShape,
+                                           int32_t blockSize, uint8_t* outputData,
+                                           const Shape& outputShape);
 
 template <typename T>
-static bool padGeneric(const T* inputData, const Shape& inputShape, const int32_t* paddings,
-                       T padValue, T* outputData, const Shape& outputShape) {
+bool spaceToDepthGeneric(const T* inputData, const Shape& inputShape, int32_t blockSize,
+                         T* outputData, const Shape& outputShape) {
+    NNTRACE_COMP("optimized_ops::SpaceToDepth");
+    tflite::optimized_ops::SpaceToDepth(inputData, convertShapeToDims(inputShape), blockSize,
+                                        outputData, convertShapeToDims(outputShape));
+    return true;
+}
+template bool spaceToDepthGeneric<float>(const float* inputData, const Shape& inputShape,
+                                         int32_t blockSize, float* outputData,
+                                         const Shape& outputShape);
+template bool spaceToDepthGeneric<_Float16>(const _Float16* inputData, const Shape& inputShape,
+                                            int32_t blockSize, _Float16* outputData,
+                                            const Shape& outputShape);
+template bool spaceToDepthGeneric<uint8_t>(const uint8_t* inputData, const Shape& inputShape,
+                                           int32_t blockSize, uint8_t* outputData,
+                                           const Shape& outputShape);
+
+template <typename T>
+bool padGeneric(const T* inputData, const Shape& inputShape, const int32_t* paddings, T padValue,
+                T* outputData, const Shape& outputShape) {
     NNTRACE_TRANS("padGeneric");
 
     // Based on
@@ -210,92 +205,66 @@
 
     return true;
 }
+template bool padGeneric<float>(const float* inputData, const Shape& inputShape,
+                                const int32_t* paddings, float padValue, float* outputData,
+                                const Shape& outputShape);
+template bool padGeneric<_Float16>(const _Float16* inputData, const Shape& inputShape,
+                                   const int32_t* paddings, _Float16 padValue, _Float16* outputData,
+                                   const Shape& outputShape);
+template bool padGeneric<uint8_t>(const uint8_t* inputData, const Shape& inputShape,
+                                  const int32_t* paddings, uint8_t padValue, uint8_t* outputData,
+                                  const Shape& outputShape);
 
-bool padFloat32(const float* inputData, const Shape& inputShape, const int32_t* paddings,
-                float padValue, float* outputData, const Shape& outputShape) {
-    return padGeneric(inputData, inputShape, paddings, padValue, outputData, outputShape);
-}
-
-bool padQuant8(const uint8_t* inputData, const Shape& inputShape, const int32_t* paddings,
-               uint8_t padValue, uint8_t* outputData, const Shape& outputShape) {
-    return padGeneric(inputData, inputShape, paddings, padValue, outputData, outputShape);
-}
-
-bool batchToSpaceGeneric(const uint8_t* inputData, const Shape& inputShape,
-                         const int32_t* blockSize,
-                         uint8_t* outputData, const Shape& outputShape) {
-    NNTRACE_TRANS("batchToSpaceGeneric");
+template <typename T>
+bool batchToSpaceGeneric(const T* inputData, const Shape& inputShape, const int32_t* blockSize,
+                         T* outputData, const Shape& outputShape) {
     // Needed by low level implementation, but not really used.
     tflite::Dims<4> blockSizeDim, cropsDim;
     const int32 crops[4] = {0, 0, 0, 0};
-    if (inputShape.type == OperandType::TENSOR_FLOAT32) {
-        NNTRACE_COMP_SWITCH("optimized_ops::BatchToSpaceND::float");
-        tflite::optimized_ops::BatchToSpaceND(
-                 reinterpret_cast<const float*>(inputData),
-                 convertShapeToDims(inputShape),
-                 blockSize, blockSizeDim,
-                 crops, cropsDim,
-                 reinterpret_cast<float*>(outputData),
-                 convertShapeToDims(outputShape));
-    } else if (inputShape.type == OperandType::TENSOR_QUANT8_ASYMM) {
-        NNTRACE_COMP_SWITCH("optimized_ops::BatchToSpaceND::uint8");
-        tflite::optimized_ops::BatchToSpaceND(
-                reinterpret_cast<const uint8_t*>(inputData),
-                convertShapeToDims(inputShape),
-                blockSize, blockSizeDim,
-                crops, cropsDim,
-                reinterpret_cast<uint8_t*>(outputData),
-                convertShapeToDims(outputShape));
-    } else {
-        LOG(ERROR) << "Unsupported data type";
-        return false;
-    }
+    NNTRACE_COMP("optimized_ops::BatchToSpaceND");
+    tflite::optimized_ops::BatchToSpaceND(inputData, convertShapeToDims(inputShape), blockSize,
+                                          blockSizeDim, crops, cropsDim, outputData,
+                                          convertShapeToDims(outputShape));
     return true;
 }
+template bool batchToSpaceGeneric<float>(const float* inputData, const Shape& inputShape,
+                                         const int32_t* blockSize, float* outputData,
+                                         const Shape& outputShape);
+template bool batchToSpaceGeneric<_Float16>(const _Float16* inputData, const Shape& inputShape,
+                                            const int32_t* blockSize, _Float16* outputData,
+                                            const Shape& outputShape);
+template bool batchToSpaceGeneric<uint8_t>(const uint8_t* inputData, const Shape& inputShape,
+                                           const int32_t* blockSize, uint8_t* outputData,
+                                           const Shape& outputShape);
 
-bool spaceToBatchGeneric(const uint8_t* inputData, const Shape& inputShape,
-                         const int32_t* blockSize,
-                         const int32_t* padding, const Shape& paddingShape,
-                         uint8_t* outputData, const Shape& outputShape) {
-    NNTRACE_TRANS("spaceToBatchGeneric");
+template <typename T>
+bool spaceToBatchGeneric(const T* inputData, const Shape& inputShape, const int32_t* blockSize,
+                         const int32_t* padding, const Shape& paddingShape, T* outputData,
+                         const Shape& outputShape) {
     // Needed by low level implementation, but not really used.
     tflite::Dims<4> blockSizeDim;
-    if (inputShape.type == OperandType::TENSOR_FLOAT32) {
-        NNTRACE_COMP_SWITCH("optimized_ops::SpaceToBatchND::float");
-        tflite::optimized_ops::SpaceToBatchND(
-                reinterpret_cast<const float*>(inputData),
-                convertShapeToDims(inputShape),
-                blockSize, blockSizeDim,
-                padding, convertShapeToDims(paddingShape),
-                reinterpret_cast<float*>(outputData),
-                convertShapeToDims(outputShape));
-    } else if (inputShape.type == OperandType::TENSOR_QUANT8_ASYMM) {
-        NNTRACE_COMP_SWITCH("optimized_ops::SpaceToBatchND::uint8");
-        tflite::optimized_ops::SpaceToBatchND(
-                reinterpret_cast<const uint8_t*>(inputData),
-                convertShapeToDims(inputShape),
-                blockSize, blockSizeDim,
-                padding, convertShapeToDims(paddingShape),
-                reinterpret_cast<uint8_t*>(outputData),
-                convertShapeToDims(outputShape));
-    } else {
-        LOG(ERROR) << "Unsupported data type";
-        return false;
-    }
+    NNTRACE_COMP("optimized_ops::SpaceToBatchND");
+    tflite::optimized_ops::SpaceToBatchND(inputData, convertShapeToDims(inputShape), blockSize,
+                                          blockSizeDim, padding, convertShapeToDims(paddingShape),
+                                          outputData, convertShapeToDims(outputShape));
     return true;
 }
+template bool spaceToBatchGeneric<float>(const float* inputData, const Shape& inputShape,
+                                         const int32_t* blockSize, const int32_t* padding,
+                                         const Shape& paddingShape, float* outputData,
+                                         const Shape& outputShape);
+template bool spaceToBatchGeneric<_Float16>(const _Float16* inputData, const Shape& inputShape,
+                                            const int32_t* blockSize, const int32_t* padding,
+                                            const Shape& paddingShape, _Float16* outputData,
+                                            const Shape& outputShape);
+template bool spaceToBatchGeneric<uint8_t>(const uint8_t* inputData, const Shape& inputShape,
+                                           const int32_t* blockSize, const int32_t* padding,
+                                           const Shape& paddingShape, uint8_t* outputData,
+                                           const Shape& outputShape);
 
-bool squeezeGeneric(const void* inputData, const Shape& inputShape,
-                    void* outputData, const Shape& outputShape) {
-    NNTRACE_COMP("squeezeGeneric");
-    size_t count = sizeOfData(inputShape.type, inputShape.dimensions);
-    memcpy(outputData, inputData, count);
-    return true;
-}
-
-bool transposeGeneric(const uint8_t* inputData, const Shape& inputShape,
-                      const int32_t* perm, const Shape& permShape,
-                      uint8_t* outputData, const Shape& outputShape) {
+template <typename T>
+bool transposeGeneric(const T* inputData, const Shape& inputShape, const int32_t* perm,
+                      const Shape& permShape, T* outputData, const Shape& outputShape) {
     NNTRACE_TRANS("transposeGeneric");
     // Reverse the permuted axes and convert to 4D due to the way Dims are
     // constructed.
@@ -308,34 +277,26 @@
         perm = perm_tmp;
     }
     int32_t reversed_perm[kOutputDimensionNum];
-    for (int32_t output_k = 0, input_k = permSize - 1; output_k < permSize;
-             ++output_k, --input_k) {
+    for (int32_t output_k = 0, input_k = permSize - 1; output_k < permSize; ++output_k, --input_k) {
         reversed_perm[output_k] = permSize - perm[input_k] - 1;
     }
     for (int32_t k = permSize; k < kOutputDimensionNum; ++k) {
         reversed_perm[k] = k;
     }
-    if (inputShape.type == OperandType::TENSOR_FLOAT32) {
-        NNTRACE_COMP_SWITCH("optimized_ops::Transpose::float");
-        tflite::reference_ops::Transpose(
-                reinterpret_cast<const float*>(inputData),
-                convertShapeToDims(inputShape),
-                reinterpret_cast<float*>(outputData),
-                convertShapeToDims(outputShape),
-                reversed_perm);
-    } else if (inputShape.type == OperandType::TENSOR_QUANT8_ASYMM) {
-        NNTRACE_COMP_SWITCH("optimized_ops::Transpose::uint8");
-        tflite::reference_ops::Transpose(
-                reinterpret_cast<const uint8_t*>(inputData),
-                convertShapeToDims(inputShape),
-                reinterpret_cast<uint8_t*>(outputData),
-                convertShapeToDims(outputShape),
-                reversed_perm);
-    } else {
-        LOG(ERROR) << "Unsupported data type";
-        return false;
-    }
+    NNTRACE_COMP_SWITCH("reference_ops::Transpose");
+    tflite::reference_ops::Transpose(inputData, convertShapeToDims(inputShape), outputData,
+                                     convertShapeToDims(outputShape), reversed_perm);
     return true;
 }
+template bool transposeGeneric<float>(const float* inputData, const Shape& inputShape,
+                                      const int32_t* perm, const Shape& permShape,
+                                      float* outputData, const Shape& outputShape);
+template bool transposeGeneric<_Float16>(const _Float16* inputData, const Shape& inputShape,
+                                         const int32_t* perm, const Shape& permShape,
+                                         _Float16* outputData, const Shape& outputShape);
+template bool transposeGeneric<uint8_t>(const uint8_t* inputData, const Shape& inputShape,
+                                        const int32_t* perm, const Shape& permShape,
+                                        uint8_t* outputData, const Shape& outputShape);
+
 } // namespace nn
 } // namespace android
diff --git a/nn/common/operations/RoiPooling.cpp b/nn/common/operations/RoiPooling.cpp
new file mode 100644
index 0000000..591617f
--- /dev/null
+++ b/nn/common/operations/RoiPooling.cpp
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "CpuOperationUtils.h"
+#include "Operations.h"
+
+#include <cfloat>
+#include <cmath>
+
+#include "Tracing.h"
+
+namespace android {
+namespace nn {
+
+template <typename T_Input>
+bool roiPoolingImpl(const T_Input* inputData, const Shape& inputShape, const float* roiData,
+                    const Shape& roiShape, float spatialScale, T_Input* outputData,
+                    const Shape& outputShape) {
+    NNTRACE_TRANS("RoiPooling");
+
+    const uint32_t kRoiDim = 4;
+
+    uint32_t inHeight = getSizeOfDimension(inputShape, 1);
+    uint32_t inWidth = getSizeOfDimension(inputShape, 2);
+    uint32_t inDepth = getSizeOfDimension(inputShape, 3);
+    uint32_t outHeight = getSizeOfDimension(outputShape, 1);
+    uint32_t outWidth = getSizeOfDimension(outputShape, 2);
+    uint32_t numRois = getSizeOfDimension(roiShape, 0);
+    uint32_t roiInfoLength = getSizeOfDimension(roiShape, 1);
+
+    T_Input* outPtr = outputData;
+    const float* roiDataEnd = roiData + numRois * roiInfoLength;
+    for (const float* roiInfo = roiData; roiInfo < roiDataEnd; roiInfo += kRoiDim) {
+        uint32_t batchId = 0;
+        // get optional batch id
+        if (roiInfoLength == kRoiDim + 1) {
+            batchId = std::round(roiInfo[0]);
+            roiInfo++;
+        }
+        const T_Input* batchBase = inputData + batchId * inHeight * inWidth * inDepth;
+
+        int32_t wRoiStart = std::round(roiInfo[0] * spatialScale);
+        int32_t hRoiStart = std::round(roiInfo[1] * spatialScale);
+        int32_t wRoiEnd = std::round(roiInfo[2] * spatialScale);
+        int32_t hRoiEnd = std::round(roiInfo[3] * spatialScale);
+
+        // Rois with width/height < 1 are considered malformed and are forced to be 1
+        float roiWidth = static_cast<float>(std::max(wRoiEnd - wRoiStart + 1, 1));
+        float roiHeight = static_cast<float>(std::max(hRoiEnd - hRoiStart + 1, 1));
+        float wStepSize = roiWidth / static_cast<float>(outWidth);
+        float hStepSize = roiHeight / static_cast<float>(outHeight);
+
+        for (uint32_t i = 0; i < outHeight; i++) {
+            for (uint32_t j = 0; j < outWidth; j++) {
+                // Take floor on start, ceil on end, start included, end excluded, i.e. [start, end)
+                // end is guaranteed to larger than start by at least 1
+                uint32_t wStart = std::floor(wStepSize * j + wRoiStart);
+                uint32_t wEnd = std::ceil(wStepSize * (j + 1) + wRoiStart);
+                uint32_t hStart = std::floor(hStepSize * i + hRoiStart);
+                uint32_t hEnd = std::ceil(hStepSize * (i + 1) + hRoiStart);
+
+                wStart = std::min(wStart, inWidth);
+                wEnd = std::min(wEnd, inWidth);
+                hStart = std::min(hStart, inHeight);
+                hEnd = std::min(hEnd, inHeight);
+
+                for (uint32_t k = 0; k < inDepth; k++) {
+                    T_Input maxValue;
+                    bool first = true;
+                    for (uint32_t h = hStart; h < hEnd; h++) {
+                        for (uint32_t w = wStart; w < wEnd; w++) {
+                            T_Input inputValue = batchBase[h * inWidth * inDepth + w * inDepth + k];
+                            if (first || inputValue > maxValue) {
+                                maxValue = inputValue;
+                                first = false;
+                            }
+                        }
+                    }
+                    outPtr[k] = maxValue;
+                }
+                outPtr += inDepth;
+            }
+        }
+    }
+    return true;
+}
+
+bool roiPoolingGeneric(const uint8_t* inputData, const Shape& inputShape, const uint8_t* roiData,
+                       const Shape& roiShape, float spatialScale, uint8_t* outputData,
+                       const Shape& outputShape) {
+    NNTRACE_TRANS("roiPoolingGeneric");
+    if (inputShape.type == OperandType::TENSOR_FLOAT32) {
+        return roiPoolingImpl<float>(reinterpret_cast<const float*>(inputData), inputShape,
+                                     reinterpret_cast<const float*>(roiData), roiShape,
+                                     spatialScale, reinterpret_cast<float*>(outputData),
+                                     outputShape);
+    } else if (inputShape.type == OperandType::TENSOR_QUANT8_ASYMM) {
+        return roiPoolingImpl<uint8_t>(reinterpret_cast<const uint8_t*>(inputData), inputShape,
+                                       reinterpret_cast<const float*>(roiData), roiShape,
+                                       spatialScale, reinterpret_cast<uint8_t*>(outputData),
+                                       outputShape);
+    } else {
+        LOG(ERROR) << "Unsupported data type";
+        return false;
+    }
+}
+
+}  // namespace nn
+}  // namespace android
diff --git a/nn/common/operations/Slice.cpp b/nn/common/operations/Slice.cpp
new file mode 100644
index 0000000..9d31b60
--- /dev/null
+++ b/nn/common/operations/Slice.cpp
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Slice.h"
+
+#include "IndexedShapeWrapper.h"
+#include "OperationsUtils.h"
+
+#include <vector>
+
+namespace android {
+namespace nn {
+namespace slice {
+
+namespace {
+
+template <typename T>
+void addVectors(const std::vector<T>& a, const std::vector<T>& b, std::vector<T>* res) {
+    for (int i = 0; i < res->size(); ++i) {
+        res->at(i) = a[i] + b[i];
+    }
+}
+
+template <typename T>
+bool evalGeneric(const T* inputData, const Shape& inputShape, const int32_t* beginData,
+                 const Shape& beginShape, const int32_t* sizeData, const Shape& sizeShape,
+                 T* outputData, const Shape& outputShape) {
+    const int outputSize = getNumberOfElements(outputShape);
+    const IndexedShapeWrapper indexedOutput = IndexedShapeWrapper(outputShape);
+    const IndexedShapeWrapper indexedInput = IndexedShapeWrapper(inputShape);
+    std::vector<uint32_t> outputIndex(getNumberOfDimensions(outputShape), 0);
+    std::vector<uint32_t> beginIndex(getSizeOfDimension(beginShape, 0));
+    std::vector<uint32_t> inputIndex(getNumberOfDimensions(inputShape));
+
+    for (int i = 0; i < beginIndex.size(); ++i) {
+        beginIndex[i] = static_cast<uint32_t>(beginData[i]);
+    }
+
+    bool lastIndex = false;
+    uint32_t outputOffset;
+    uint32_t inputOffset;
+
+    do {
+        addVectors(outputIndex, beginIndex, &inputIndex);
+
+        NN_RET_CHECK(indexedOutput.indexToFlatIndex(outputIndex, &outputOffset));
+        NN_RET_CHECK(indexedInput.indexToFlatIndex(inputIndex, &inputOffset));
+
+        outputData[outputOffset] = inputData[inputOffset];
+        NN_RET_CHECK(indexedOutput.nextIndexInplace(&outputIndex, &lastIndex));
+    } while (!lastIndex);
+    return true;
+}
+
+}  // namespace
+
+bool prepare(const Shape& inputShape, const void* untypedBeginData, const Shape& beginShape,
+             const void* untypedSizeData, const Shape& sizeShape, Shape* outputShape) {
+    const int32_t n_dims = getNumberOfDimensions(inputShape);
+    NN_RET_CHECK(n_dims > 0);
+
+    NN_RET_CHECK_EQ(getNumberOfDimensions(beginShape), 1);
+    NN_RET_CHECK_EQ(getSizeOfDimension(beginShape, 0), n_dims);
+
+    NN_RET_CHECK_EQ(getNumberOfDimensions(sizeShape), 1);
+    NN_RET_CHECK_EQ(getSizeOfDimension(sizeShape, 0), n_dims);
+
+    const int32_t* beginData = reinterpret_cast<const int32_t*>(untypedBeginData);
+    const int32_t* sizeData = reinterpret_cast<const int32_t*>(untypedSizeData);
+
+    outputShape->dimensions.resize(n_dims);
+    for (int i = 0; i < n_dims; ++i) {
+        const int32_t sliceBegin = beginData[i];
+        int32_t sliceSize = sizeData[i];
+        NN_RET_CHECK_LT(beginData[i], getSizeOfDimension(inputShape, i));
+        NN_RET_CHECK(sliceSize > 0 || sliceSize == -1);
+        if (sliceSize == -1) {
+            sliceSize = getSizeOfDimension(inputShape, i) - sliceBegin;
+        }
+        NN_RET_CHECK_LE(sliceBegin + sliceSize, getSizeOfDimension(inputShape, i));
+        outputShape->dimensions[i] = sliceSize;
+    }
+    return true;
+}
+
+bool eval(const void* inputData, const Shape& inputShape, const void* untypedBeginData,
+          const Shape& beginShape, const void* untypedSizeData, const Shape& sizeShape,
+          void* outputData, const Shape& outputShape) {
+    const int32_t* beginData = reinterpret_cast<const int32_t*>(untypedBeginData);
+    const int32_t* sizeData = reinterpret_cast<const int32_t*>(untypedSizeData);
+    switch (inputShape.type) {
+        case OperandType::TENSOR_FLOAT16: {
+            return evalGeneric(reinterpret_cast<const _Float16*>(inputData), inputShape, beginData,
+                               beginShape, sizeData, sizeShape,
+                               reinterpret_cast<_Float16*>(outputData), outputShape);
+        } break;
+        case OperandType::TENSOR_FLOAT32: {
+            return evalGeneric(reinterpret_cast<const float*>(inputData), inputShape, beginData,
+                               beginShape, sizeData, sizeShape,
+                               reinterpret_cast<float*>(outputData), outputShape);
+        } break;
+        case OperandType::TENSOR_INT32: {
+            return evalGeneric(reinterpret_cast<const int32_t*>(inputData), inputShape, beginData,
+                               beginShape, sizeData, sizeShape,
+                               reinterpret_cast<int32_t*>(outputData), outputShape);
+        } break;
+        case OperandType::TENSOR_QUANT8_ASYMM: {
+            return evalGeneric(reinterpret_cast<const uint8_t*>(inputData), inputShape, beginData,
+                               beginShape, sizeData, sizeShape,
+                               reinterpret_cast<uint8_t*>(outputData), outputShape);
+        } break;
+        default: {
+            LOG(ERROR) << "Unsupported data type: " << toString(inputShape.type);
+            return false;
+        }
+    }
+}
+
+}  // namespace slice
+}  // namespace nn
+}  // namespace android
diff --git a/nn/common/operations/Slice.h b/nn/common/operations/Slice.h
new file mode 100644
index 0000000..e9e33cb
--- /dev/null
+++ b/nn/common/operations/Slice.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FRAMEWORKS_ML_NN_SLICE_H
+#define FRAMEWORKS_ML_NN_SLICE_H
+
+#include "CpuOperationUtils.h"
+
+namespace android {
+namespace nn {
+namespace slice {
+
+bool prepare(const Shape& input, const void* beginData, const Shape& beginShape,
+             const void* sizeData, const Shape& sizeShape, Shape* outputShape);
+
+bool eval(const void* inputData, const Shape& inputShape, const void* beginData,
+          const Shape& beginShape, const void* sizeData, const Shape& sizeShape, void* outputData,
+          const Shape& outputShape);
+
+}  // namespace slice
+}  // namespace nn
+}  // namespace android
+
+#endif  // FRAMEWORKS_ML_NN_SLICE_H
diff --git a/nn/common/operations/StridedSlice.cpp b/nn/common/operations/StridedSlice.cpp
index 71e724e..1e1493b 100644
--- a/nn/common/operations/StridedSlice.cpp
+++ b/nn/common/operations/StridedSlice.cpp
@@ -62,21 +62,21 @@
     if (inputShape.type == OperandType::TENSOR_FLOAT32) {
         NNTRACE_COMP_SWITCH("reference_ops::StridedSlice::float");
         tflite::reference_ops::StridedSlice(
-                reinterpret_cast<const float*>(inputData),
-                convertShapeToDims(inputShape),
-                beginMask, endMask, shrinkAxisMask,
-                starts, stops, strides,
-                reinterpret_cast<float*>(outputData),
-                convertShapeToDims(outputShape));
+                reinterpret_cast<const float*>(inputData), convertShapeToDims(inputShape),
+                beginMask, endMask, shrinkAxisMask, starts, stops, strides,
+                reinterpret_cast<float*>(outputData), convertShapeToDims(outputShape));
+    } else if (inputShape.type == OperandType::TENSOR_FLOAT16) {
+        NNTRACE_COMP_SWITCH("reference_ops::StridedSlice::float16");
+        tflite::reference_ops::StridedSlice(
+                reinterpret_cast<const _Float16*>(inputData), convertShapeToDims(inputShape),
+                beginMask, endMask, shrinkAxisMask, starts, stops, strides,
+                reinterpret_cast<_Float16*>(outputData), convertShapeToDims(outputShape));
     } else if (inputShape.type == OperandType::TENSOR_QUANT8_ASYMM) {
         NNTRACE_COMP_SWITCH("reference_ops::StridedSlice::uint8");
         tflite::reference_ops::StridedSlice(
-                reinterpret_cast<const uint8_t*>(inputData),
-                convertShapeToDims(inputShape),
-                beginMask, endMask, shrinkAxisMask,
-                starts, stops, strides,
-                reinterpret_cast<uint8_t*>(outputData),
-                convertShapeToDims(outputShape));
+                reinterpret_cast<const uint8_t*>(inputData), convertShapeToDims(inputShape),
+                beginMask, endMask, shrinkAxisMask, starts, stops, strides,
+                reinterpret_cast<uint8_t*>(outputData), convertShapeToDims(outputShape));
     } else {
         LOG(ERROR) << "Unsupported data type";
         return false;
diff --git a/nn/runtime/include/NeuralNetworks.h b/nn/runtime/include/NeuralNetworks.h
index c9f772e..5574fd3 100644
--- a/nn/runtime/include/NeuralNetworks.h
+++ b/nn/runtime/include/NeuralNetworks.h
@@ -244,16 +244,19 @@
      * dimensions except the dimension along the concatenation axis.
      *
      * Supported tensor {@link OperandCode}:
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
-     * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+     * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (full support since API
+     *   level 29, see the input section)
      *
      * Supported tensor rank: up to 4
      *
      * Inputs:
      * * 0 ~ n-1: The list of n input tensors, of shape
-     *            [D0, D1, ..., Daxis(i), ..., Dm]. For inputs of
-     *            {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, all input tensors
-     *            must have the same scale and zeroPoint.
+     *            [D0, D1, ..., Daxis(i), ..., Dm].
+     *            Before API level 29, all input tensors of
+     *            {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+     *            must have the same scale and zeroPoint as the output tensor.
      * * n: An {@link ANEURALNETWORKS_INT32} scalar, specifying the
      *      concatenation axis.
      *
@@ -479,6 +482,7 @@
      * be divisible by block_size * block_size
      *
      * Supported tensor {@link OperandCode}:
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
      *
@@ -1283,6 +1287,7 @@
      *     output = max(0, input)
      *
      * Supported tensor {@link OperandCode}:
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
      *
@@ -1306,6 +1311,7 @@
      *     output = min(1.f, max(-1.f, input))
      *
      * Supported tensor {@link OperandCode}:
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
      *
@@ -1329,6 +1335,7 @@
      *     output = min(6, max(0, input))
      *
      * Supported tensor {@link OperandCode}:
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
      *
@@ -1351,6 +1358,7 @@
      * tensor, but with a newly specified shape.
      *
      * Supported tensor {@link OperandCode}:
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
      *
@@ -1377,6 +1385,7 @@
      * same as corner pixels of input.
      *
      * Supported tensor {@link OperandCode}:
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
      *
      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
@@ -1515,6 +1524,7 @@
      * The input tensor's height and width must be divisible by block_size.
      *
      * Supported tensor {@link OperandCode}:
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
      *
@@ -1661,6 +1671,7 @@
      * This is the reverse of SpaceToBatch.
      *
      * Supported tensor {@link OperandCode}:
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
      *
@@ -1764,6 +1775,7 @@
      * This operation pads a tensor according to the specified paddings.
      *
      * Supported tensor {@link OperandCode}:
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
      *
@@ -1805,6 +1817,7 @@
      * dimensions of the input are optionally zero padded according to paddings.
      *
      * Supported tensor {@link OperandCode}:
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
      *
@@ -1845,6 +1858,7 @@
      * dimensions by specifying the axes (input1).
      *
      * Supported tensor {@link OperandCode}:
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
      *
@@ -1877,6 +1891,7 @@
      * reverse slice.
      *
      * Supported tensor {@link OperandCode}:
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
      *
@@ -1965,6 +1980,7 @@
      * regular matrix transpose on 2-D input Tensors.
      *
      * Supported tensor {@link OperandCode}:
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
      *
@@ -1988,6 +2004,7 @@
      * Returns the index of the largest element along an axis.
      *
      * Supported tensor {@link OperandCode}:
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
      * * {@link ANEURALNETWORKS_TENSOR_INT32}
      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
@@ -2013,6 +2030,7 @@
      * Returns the index of the smallest element along an axis.
      *
      * Supported tensor {@link OperandCode}:
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
      * * {@link ANEURALNETWORKS_TENSOR_INT32}
      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
@@ -2037,6 +2055,7 @@
      * paddings.
      *
      * Supported tensor {@link OperandCode}:
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
      *
@@ -2671,6 +2690,39 @@
     ANEURALNETWORKS_RSQRT = 75,
     ANEURALNETWORKS_SELECT = 76,
     ANEURALNETWORKS_SIN = 77,
+    /**
+     * Extracts a slice of specified size from the input tensor starting at a
+     * specified location.
+     *
+     * The starting location is specified as a 1-D tensor containing offsets
+     * for each dimension. The size is specified as a 1-D tensor containing
+     * either size of a slice along corresponding dimension or -1. In the latter
+     * case, all the remaining elements in dimension are included in the slice.
+     * Slice size in each dimension cannot be zero.
+     *
+     * A sum of begin offset and a size of a slice must not exceed size of a
+     * corresponding dimension.
+     *
+     * Supported tensor {@link OperandCode}:
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+     * * {@link ANEURALNETWORKS_TENSOR_INT32}
+     * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: from 1
+     *
+     * Inputs:
+     * * 0: An n-D tensor to take slice from.
+     * * 1: A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} specifying
+     *      the beginning indices of the slice in each dimension.
+     * * 2: A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} specifying
+     *      the size of the slice in each dimension.
+     *
+     * Outputs:
+     * * 0: An n-D tensor of the same type as the input containing the slice.
+     *
+     * Available since API level 29.
+     */
     ANEURALNETWORKS_SLICE = 78,
     ANEURALNETWORKS_SPARSE_TO_DENSE = 79,
 
@@ -2911,7 +2963,61 @@
      */
     ANEURALNETWORKS_ROTATED_BBOX_TRANSFORM = 87,
 
+    /**
+     * Computes the absolute value of a tensor, element-wise.
+     *
+     * Supported tensor {@link OperandCode}:
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+     *
+     * Supported tensor rank: from 1.
+     *
+     * Inputs:
+     * * 0: A tensor.
+     *
+     * Outputs:
+     * * 0: The output tensor of same shape as input0.
+     *
+     * Available since API level 29.
+     */
     ANEURALNETWORKS_ABS = 88,
+
+    /**
+     * Select and scale the feature map of each region of interest to a unified
+     * output size by max-pooling.
+     *
+     * The region of interest is represented by its upper-left corner coordinate
+     * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
+     * A spatial scaling factor is applied to map into feature map coordinate.
+     * A valid region of interest should satisfy x1 < x2 and y1 < y2.
+     *
+     * Rounding is applied in this operation to ensure integer boundary for
+     * regions of interest and pooling bins.
+     *
+     * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+     * With the default data layout NHWC, the data is stored in the order of:
+     * [batch, height, width, channels]. Alternatively, the data layout could
+     * be NCHW, the data storage order of: [batch, channels, height, width].
+     *
+     * Inputs:
+     * * 0: A 4-D tensor, specifying the feature map.
+     * * 1: A 2-D Tensor of shape [num_rois, 5 or 4], specifying the locations
+     *      of the regions of interest, each line with format
+     *      [<optional batch_id>, x1, y1, x2, y2]. The batch_id is optional if
+     *      there is only one batch.
+     * * 2: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32},
+     *      specifying the size of the output tensor [out_height, out_width].
+     * * 3: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the spatial
+     *      scaling factor from original image to feature map.
+     * * 4: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
+     *      NCHW data layout for input0 and output0. Set to false for NHWC.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandCode} as input0. The output
+     *      shape is [num_rois, out_height, out_width, depth].
+     *
+     * Available since API level 29.
+     */
     ANEURALNETWORKS_ROI_POOLING = 89,
 } OperationCode;
 
diff --git a/nn/runtime/test/TestValidateOperations.cpp b/nn/runtime/test/TestValidateOperations.cpp
index eae6a9b..119cfa8 100644
--- a/nn/runtime/test/TestValidateOperations.cpp
+++ b/nn/runtime/test/TestValidateOperations.cpp
@@ -256,6 +256,14 @@
     EXPECT_TRUE(activationTest.testMutatingOutputOperandCounts());
 }
 
+TEST(OperationValidationTest, ABS_float16) {
+    activationOpTest(ANEURALNETWORKS_ABS, ANEURALNETWORKS_TENSOR_FLOAT16);
+}
+
+TEST(OperationValidationTest, ABS_float32) {
+    activationOpTest(ANEURALNETWORKS_ABS, ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+
 TEST(OperationValidationTest, FLOOR_float32) {
     activationOpTest(ANEURALNETWORKS_FLOOR, ANEURALNETWORKS_TENSOR_FLOAT32);
 }
@@ -801,6 +809,10 @@
     depthwiseConvOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
 }
 
+TEST(OperationValidationTest, DEPTHWISE_CONV_2D_float16) {
+    depthwiseConvOpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
+}
+
 TEST(OperationValidationTest, DEPTHWISE_CONV_2D_quant8) {
     depthwiseConvOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
 }
@@ -1363,20 +1375,23 @@
     stridedSliceOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
 }
 
+TEST(OperationValidationTest, STRIDED_SLICE_float16) {
+    stridedSliceOpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
+}
+
 TEST(OperationValidationTest, STRIDED_SLICE_quant8) {
     stridedSliceOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
 }
 
-TEST(OperationValidationTest, ROI_ALIGN_float32) {
+void roiAlignOpTest(int32_t operandCode) {
     uint32_t inDim[] = {1, 4, 4, 1}, roiDim[] = {4, 4}, outShapeDim[] = {2};
     uint32_t outDim[] = {4, 2, 2, 1};
     OperationTestBase roiAlignTest(
             ANEURALNETWORKS_ROI_ALIGN,
-            {getOpType(ANEURALNETWORKS_TENSOR_FLOAT32, 4, inDim),
-             getOpType(ANEURALNETWORKS_TENSOR_FLOAT32, 2, roiDim),
+            {getOpType(operandCode, 4, inDim), getOpType(ANEURALNETWORKS_TENSOR_FLOAT32, 2, roiDim),
              getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, outShapeDim),
              getOpType(ANEURALNETWORKS_FLOAT32), getOpType(ANEURALNETWORKS_INT32)},
-            {getOpType(ANEURALNETWORKS_TENSOR_FLOAT32, 4, outDim)});
+            {getOpType(operandCode, 4, outDim)});
 
     EXPECT_TRUE(roiAlignTest.testMutatingInputOperandCode());
     EXPECT_TRUE(roiAlignTest.testMutatingInputOperandCounts());
@@ -1384,6 +1399,38 @@
     EXPECT_TRUE(roiAlignTest.testMutatingOutputOperandCounts());
 }
 
+TEST(OperationValidationTest, ROI_ALIGN_float32) {
+    roiAlignOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+
+TEST(OperationValidationTest, ROI_ALIGN_quant8) {
+    roiAlignOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
+}
+
+void roiPoolingOpTest(int32_t operandCode) {
+    uint32_t inDim[] = {1, 4, 4, 1}, roiDim[] = {4, 4}, outShapeDim[] = {2};
+    uint32_t outDim[] = {4, 2, 2, 1};
+    OperationTestBase roiPoolingTest(
+            ANEURALNETWORKS_ROI_POOLING,
+            {getOpType(operandCode, 4, inDim), getOpType(ANEURALNETWORKS_TENSOR_FLOAT32, 2, roiDim),
+             getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, outShapeDim),
+             getOpType(ANEURALNETWORKS_FLOAT32)},
+            {getOpType(operandCode, 4, outDim)});
+
+    EXPECT_TRUE(roiPoolingTest.testMutatingInputOperandCode());
+    EXPECT_TRUE(roiPoolingTest.testMutatingInputOperandCounts());
+    EXPECT_TRUE(roiPoolingTest.testMutatingOutputOperandCode());
+    EXPECT_TRUE(roiPoolingTest.testMutatingOutputOperandCounts());
+}
+
+TEST(OperationValidationTest, ROI_POOLING_float32) {
+    roiPoolingOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+
+TEST(OperationValidationTest, ROI_POOLING_quant8) {
+    roiPoolingOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
+}
+
 TEST(OperationValidationTest, HEATMAP_MAX_KEYPOINT_float32) {
     uint32_t heatmapDim[] = {6, 4, 4, 1}, boxDim[] = {6, 4}, outDim[] = {6, 3, 1};
     OperationTestBase heatmapMaxKeypointTest(
@@ -1612,4 +1659,36 @@
     EXPECT_TRUE(rotatedBBoxTransformTest.testMutatingOutputOperandCode());
     EXPECT_TRUE(rotatedBBoxTransformTest.testMutatingOutputOperandCounts());
 }
+
+void sliceTest(int32_t operandCode) {
+    uint32_t inputDim[] = {3, 3, 3};
+    uint32_t startDim[] = {3};
+    uint32_t sizeDim[] = {3};
+    uint32_t outputDim[] = {1, 2, 3};
+
+    OperationTestBase sliceTest(ANEURALNETWORKS_SLICE,
+                                {getOpType(operandCode, 3, inputDim),
+                                 getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, startDim),
+                                 getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, sizeDim)},
+                                {getOpType(operandCode, 3, outputDim)});
+
+    EXPECT_TRUE(sliceTest.testMutatingInputOperandCode());
+    EXPECT_TRUE(sliceTest.testMutatingInputOperandCounts());
+    EXPECT_TRUE(sliceTest.testMutatingOutputOperandCode());
+    EXPECT_TRUE(sliceTest.testMutatingOutputOperandCounts());
+}
+
+TEST(OperationValidationTest, SLICE_float32) {
+    sliceTest(ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+TEST(OperationValidationTest, SLICE_int32) {
+    sliceTest(ANEURALNETWORKS_TENSOR_INT32);
+}
+TEST(OperationValidationTest, SLICE_uint8) {
+    sliceTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
+}
+TEST(OperationValidationTest, SLICE_float16) {
+    sliceTest(ANEURALNETWORKS_TENSOR_FLOAT16);
+}
+
 }  // end namespace
diff --git a/nn/runtime/test/for-cts/TestGeneratedOneFile.cpp b/nn/runtime/test/for-cts/TestGeneratedOneFile.cpp
index a9bacb6..0ae2513 100644
--- a/nn/runtime/test/for-cts/TestGeneratedOneFile.cpp
+++ b/nn/runtime/test/for-cts/TestGeneratedOneFile.cpp
@@ -325,31 +325,24 @@
 #include "../generated/tests/transpose_float_1_relaxed.mod.py.cpp"
 #include "../generated/tests/transpose_quant8_1.mod.py.cpp"
 #include "../generated/tests/transpose_relaxed.mod.py.cpp"
+#include "../generated/tests/abs.mod.py.cpp"
 #include "../generated/tests/add_broadcast_float16.mod.py.cpp"
 #include "../generated/tests/add_float16.mod.py.cpp"
-#include "../generated/tests/argmax_1_float.mod.py.cpp"
-#include "../generated/tests/argmax_1_float_relaxed.mod.py.cpp"
-#include "../generated/tests/argmax_1_int32.mod.py.cpp"
-#include "../generated/tests/argmax_1_quant8.mod.py.cpp"
-#include "../generated/tests/argmax_2_float.mod.py.cpp"
-#include "../generated/tests/argmax_2_float_relaxed.mod.py.cpp"
-#include "../generated/tests/argmax_2_int32.mod.py.cpp"
-#include "../generated/tests/argmax_2_quant8.mod.py.cpp"
-#include "../generated/tests/argmax_3_float.mod.py.cpp"
-#include "../generated/tests/argmin_1_float.mod.py.cpp"
-#include "../generated/tests/argmin_1_float_relaxed.mod.py.cpp"
-#include "../generated/tests/argmin_1_int32.mod.py.cpp"
-#include "../generated/tests/argmin_1_quant8.mod.py.cpp"
-#include "../generated/tests/argmin_2_float.mod.py.cpp"
-#include "../generated/tests/argmin_2_float_relaxed.mod.py.cpp"
-#include "../generated/tests/argmin_2_int32.mod.py.cpp"
-#include "../generated/tests/argmin_2_quant8.mod.py.cpp"
-#include "../generated/tests/argmin_3_float.mod.py.cpp"
+#include "../generated/tests/argmax_1.mod.py.cpp"
+#include "../generated/tests/argmax_2.mod.py.cpp"
+#include "../generated/tests/argmax_3.mod.py.cpp"
+#include "../generated/tests/argmin_1.mod.py.cpp"
+#include "../generated/tests/argmin_2.mod.py.cpp"
+#include "../generated/tests/argmin_3.mod.py.cpp"
 #include "../generated/tests/avg_pool_v1_2.mod.py.cpp"
 #include "../generated/tests/axis_aligned_bbox_transform.mod.py.cpp"
 #include "../generated/tests/batch_to_space_v1_2.mod.py.cpp"
 #include "../generated/tests/cast.mod.py.cpp"
 #include "../generated/tests/channel_shuffle.mod.py.cpp"
+#include "../generated/tests/concat_float16_1.mod.py.cpp"
+#include "../generated/tests/concat_float16_2.mod.py.cpp"
+#include "../generated/tests/concat_float16_3.mod.py.cpp"
+#include "../generated/tests/concat_mixed_quant.mod.py.cpp"
 #include "../generated/tests/conv2d_v1_2.mod.py.cpp"
 #include "../generated/tests/depth_to_space_v1_2.mod.py.cpp"
 #include "../generated/tests/depthwise_conv2d_v1_2.mod.py.cpp"
@@ -374,6 +367,7 @@
 #include "../generated/tests/minimum.mod.py.cpp"
 #include "../generated/tests/mul_broadcast_float16.mod.py.cpp"
 #include "../generated/tests/mul_float16.mod.py.cpp"
+#include "../generated/tests/pad_float16.mod.py.cpp"
 #include "../generated/tests/pad_v2_1_float.mod.py.cpp"
 #include "../generated/tests/pad_v2_1_float_relaxed.mod.py.cpp"
 #include "../generated/tests/pad_v2_1_quant8.mod.py.cpp"
@@ -388,9 +382,12 @@
 #include "../generated/tests/relu6_float16_2.mod.py.cpp"
 #include "../generated/tests/relu_float16_1.mod.py.cpp"
 #include "../generated/tests/relu_float16_2.mod.py.cpp"
+#include "../generated/tests/reshape_float16.mod.py.cpp"
 #include "../generated/tests/resize_bilinear_v1_2.mod.py.cpp"
 #include "../generated/tests/roi_align.mod.py.cpp"
+#include "../generated/tests/roi_pooling.mod.py.cpp"
 #include "../generated/tests/rotated_bbox_transform.mod.py.cpp"
+#include "../generated/tests/slice.mod.py.cpp"
 #include "../generated/tests/softmax_v1_2.mod.py.cpp"
 #include "../generated/tests/space_to_batch_v1_2.mod.py.cpp"
 #include "../generated/tests/space_to_depth_v1_2.mod.py.cpp"
@@ -407,6 +404,8 @@
 #include "../generated/tests/split_quant8_2.mod.py.cpp"
 #include "../generated/tests/split_quant8_3.mod.py.cpp"
 #include "../generated/tests/split_quant8_4.mod.py.cpp"
+#include "../generated/tests/squeeze_float16.mod.py.cpp"
+#include "../generated/tests/strided_slice_float16.mod.py.cpp"
 #include "../generated/tests/sub_float16.mod.py.cpp"
 #include "../generated/tests/sub_float16_broadcast.mod.py.cpp"
 #include "../generated/tests/sub_quantized.mod.py.cpp"
@@ -419,4 +418,5 @@
 #include "../generated/tests/tile_3.mod.py.cpp"
 #include "../generated/tests/topk_v2.mod.py.cpp"
 #include "../generated/tests/transpose_conv2d.mod.py.cpp"
+#include "../generated/tests/transpose_float16.mod.py.cpp"
 #include "../generated/tests/transpose_v1_2.mod.py.cpp"
diff --git a/nn/runtime/test/generated/all_generated_V1_0_vts_tests.cpp b/nn/runtime/test/generated/all_generated_V1_0_vts_tests.cpp
index 3fdf391..2d75889 100644
--- a/nn/runtime/test/generated/all_generated_V1_0_vts_tests.cpp
+++ b/nn/runtime/test/generated/all_generated_V1_0_vts_tests.cpp
@@ -13,12 +13,12 @@
   generated_tests::Execute(device,
                            add::createTestModel,
                            add::is_ignored,
-                           add::examples);
+                           add::get_examples());
 }
 
 TEST_F(ValidationTest, add) {
   const Model model = add::createTestModel();
-  const std::vector<Request> requests = createRequests(add::examples);
+  const std::vector<Request> requests = createRequests(add::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -36,12 +36,12 @@
   generated_tests::Execute(device,
                            add_broadcast_quant8::createTestModel,
                            add_broadcast_quant8::is_ignored,
-                           add_broadcast_quant8::examples);
+                           add_broadcast_quant8::get_examples());
 }
 
 TEST_F(ValidationTest, add_broadcast_quant8) {
   const Model model = add_broadcast_quant8::createTestModel();
-  const std::vector<Request> requests = createRequests(add_broadcast_quant8::examples);
+  const std::vector<Request> requests = createRequests(add_broadcast_quant8::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -59,12 +59,12 @@
   generated_tests::Execute(device,
                            add_quant8::createTestModel,
                            add_quant8::is_ignored,
-                           add_quant8::examples);
+                           add_quant8::get_examples());
 }
 
 TEST_F(ValidationTest, add_quant8) {
   const Model model = add_quant8::createTestModel();
-  const std::vector<Request> requests = createRequests(add_quant8::examples);
+  const std::vector<Request> requests = createRequests(add_quant8::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -82,12 +82,12 @@
   generated_tests::Execute(device,
                            avg_pool_float_1::createTestModel,
                            avg_pool_float_1::is_ignored,
-                           avg_pool_float_1::examples);
+                           avg_pool_float_1::get_examples());
 }
 
 TEST_F(ValidationTest, avg_pool_float_1) {
   const Model model = avg_pool_float_1::createTestModel();
-  const std::vector<Request> requests = createRequests(avg_pool_float_1::examples);
+  const std::vector<Request> requests = createRequests(avg_pool_float_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -105,12 +105,12 @@
   generated_tests::Execute(device,
                            avg_pool_float_2::createTestModel,
                            avg_pool_float_2::is_ignored,
-                           avg_pool_float_2::examples);
+                           avg_pool_float_2::get_examples());
 }
 
 TEST_F(ValidationTest, avg_pool_float_2) {
   const Model model = avg_pool_float_2::createTestModel();
-  const std::vector<Request> requests = createRequests(avg_pool_float_2::examples);
+  const std::vector<Request> requests = createRequests(avg_pool_float_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -128,12 +128,12 @@
   generated_tests::Execute(device,
                            avg_pool_float_3::createTestModel,
                            avg_pool_float_3::is_ignored,
-                           avg_pool_float_3::examples);
+                           avg_pool_float_3::get_examples());
 }
 
 TEST_F(ValidationTest, avg_pool_float_3) {
   const Model model = avg_pool_float_3::createTestModel();
-  const std::vector<Request> requests = createRequests(avg_pool_float_3::examples);
+  const std::vector<Request> requests = createRequests(avg_pool_float_3::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -151,12 +151,12 @@
   generated_tests::Execute(device,
                            avg_pool_float_4::createTestModel,
                            avg_pool_float_4::is_ignored,
-                           avg_pool_float_4::examples);
+                           avg_pool_float_4::get_examples());
 }
 
 TEST_F(ValidationTest, avg_pool_float_4) {
   const Model model = avg_pool_float_4::createTestModel();
-  const std::vector<Request> requests = createRequests(avg_pool_float_4::examples);
+  const std::vector<Request> requests = createRequests(avg_pool_float_4::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -174,12 +174,12 @@
   generated_tests::Execute(device,
                            avg_pool_float_5::createTestModel,
                            avg_pool_float_5::is_ignored,
-                           avg_pool_float_5::examples);
+                           avg_pool_float_5::get_examples());
 }
 
 TEST_F(ValidationTest, avg_pool_float_5) {
   const Model model = avg_pool_float_5::createTestModel();
-  const std::vector<Request> requests = createRequests(avg_pool_float_5::examples);
+  const std::vector<Request> requests = createRequests(avg_pool_float_5::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -197,12 +197,12 @@
   generated_tests::Execute(device,
                            avg_pool_quant8_1::createTestModel,
                            avg_pool_quant8_1::is_ignored,
-                           avg_pool_quant8_1::examples);
+                           avg_pool_quant8_1::get_examples());
 }
 
 TEST_F(ValidationTest, avg_pool_quant8_1) {
   const Model model = avg_pool_quant8_1::createTestModel();
-  const std::vector<Request> requests = createRequests(avg_pool_quant8_1::examples);
+  const std::vector<Request> requests = createRequests(avg_pool_quant8_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -220,12 +220,12 @@
   generated_tests::Execute(device,
                            avg_pool_quant8_2::createTestModel,
                            avg_pool_quant8_2::is_ignored,
-                           avg_pool_quant8_2::examples);
+                           avg_pool_quant8_2::get_examples());
 }
 
 TEST_F(ValidationTest, avg_pool_quant8_2) {
   const Model model = avg_pool_quant8_2::createTestModel();
-  const std::vector<Request> requests = createRequests(avg_pool_quant8_2::examples);
+  const std::vector<Request> requests = createRequests(avg_pool_quant8_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -243,12 +243,12 @@
   generated_tests::Execute(device,
                            avg_pool_quant8_3::createTestModel,
                            avg_pool_quant8_3::is_ignored,
-                           avg_pool_quant8_3::examples);
+                           avg_pool_quant8_3::get_examples());
 }
 
 TEST_F(ValidationTest, avg_pool_quant8_3) {
   const Model model = avg_pool_quant8_3::createTestModel();
-  const std::vector<Request> requests = createRequests(avg_pool_quant8_3::examples);
+  const std::vector<Request> requests = createRequests(avg_pool_quant8_3::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -266,12 +266,12 @@
   generated_tests::Execute(device,
                            avg_pool_quant8_4::createTestModel,
                            avg_pool_quant8_4::is_ignored,
-                           avg_pool_quant8_4::examples);
+                           avg_pool_quant8_4::get_examples());
 }
 
 TEST_F(ValidationTest, avg_pool_quant8_4) {
   const Model model = avg_pool_quant8_4::createTestModel();
-  const std::vector<Request> requests = createRequests(avg_pool_quant8_4::examples);
+  const std::vector<Request> requests = createRequests(avg_pool_quant8_4::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -289,12 +289,12 @@
   generated_tests::Execute(device,
                            avg_pool_quant8_5::createTestModel,
                            avg_pool_quant8_5::is_ignored,
-                           avg_pool_quant8_5::examples);
+                           avg_pool_quant8_5::get_examples());
 }
 
 TEST_F(ValidationTest, avg_pool_quant8_5) {
   const Model model = avg_pool_quant8_5::createTestModel();
-  const std::vector<Request> requests = createRequests(avg_pool_quant8_5::examples);
+  const std::vector<Request> requests = createRequests(avg_pool_quant8_5::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -312,12 +312,12 @@
   generated_tests::Execute(device,
                            concat_float_1::createTestModel,
                            concat_float_1::is_ignored,
-                           concat_float_1::examples);
+                           concat_float_1::get_examples());
 }
 
 TEST_F(ValidationTest, concat_float_1) {
   const Model model = concat_float_1::createTestModel();
-  const std::vector<Request> requests = createRequests(concat_float_1::examples);
+  const std::vector<Request> requests = createRequests(concat_float_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -335,12 +335,12 @@
   generated_tests::Execute(device,
                            concat_float_2::createTestModel,
                            concat_float_2::is_ignored,
-                           concat_float_2::examples);
+                           concat_float_2::get_examples());
 }
 
 TEST_F(ValidationTest, concat_float_2) {
   const Model model = concat_float_2::createTestModel();
-  const std::vector<Request> requests = createRequests(concat_float_2::examples);
+  const std::vector<Request> requests = createRequests(concat_float_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -358,12 +358,12 @@
   generated_tests::Execute(device,
                            concat_float_3::createTestModel,
                            concat_float_3::is_ignored,
-                           concat_float_3::examples);
+                           concat_float_3::get_examples());
 }
 
 TEST_F(ValidationTest, concat_float_3) {
   const Model model = concat_float_3::createTestModel();
-  const std::vector<Request> requests = createRequests(concat_float_3::examples);
+  const std::vector<Request> requests = createRequests(concat_float_3::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -381,12 +381,12 @@
   generated_tests::Execute(device,
                            concat_quant8_1::createTestModel,
                            concat_quant8_1::is_ignored,
-                           concat_quant8_1::examples);
+                           concat_quant8_1::get_examples());
 }
 
 TEST_F(ValidationTest, concat_quant8_1) {
   const Model model = concat_quant8_1::createTestModel();
-  const std::vector<Request> requests = createRequests(concat_quant8_1::examples);
+  const std::vector<Request> requests = createRequests(concat_quant8_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -404,12 +404,12 @@
   generated_tests::Execute(device,
                            concat_quant8_2::createTestModel,
                            concat_quant8_2::is_ignored,
-                           concat_quant8_2::examples);
+                           concat_quant8_2::get_examples());
 }
 
 TEST_F(ValidationTest, concat_quant8_2) {
   const Model model = concat_quant8_2::createTestModel();
-  const std::vector<Request> requests = createRequests(concat_quant8_2::examples);
+  const std::vector<Request> requests = createRequests(concat_quant8_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -427,12 +427,12 @@
   generated_tests::Execute(device,
                            concat_quant8_3::createTestModel,
                            concat_quant8_3::is_ignored,
-                           concat_quant8_3::examples);
+                           concat_quant8_3::get_examples());
 }
 
 TEST_F(ValidationTest, concat_quant8_3) {
   const Model model = concat_quant8_3::createTestModel();
-  const std::vector<Request> requests = createRequests(concat_quant8_3::examples);
+  const std::vector<Request> requests = createRequests(concat_quant8_3::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -450,12 +450,12 @@
   generated_tests::Execute(device,
                            conv_1_h3_w2_SAME::createTestModel,
                            conv_1_h3_w2_SAME::is_ignored,
-                           conv_1_h3_w2_SAME::examples);
+                           conv_1_h3_w2_SAME::get_examples());
 }
 
 TEST_F(ValidationTest, conv_1_h3_w2_SAME) {
   const Model model = conv_1_h3_w2_SAME::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_1_h3_w2_SAME::examples);
+  const std::vector<Request> requests = createRequests(conv_1_h3_w2_SAME::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -473,12 +473,12 @@
   generated_tests::Execute(device,
                            conv_1_h3_w2_VALID::createTestModel,
                            conv_1_h3_w2_VALID::is_ignored,
-                           conv_1_h3_w2_VALID::examples);
+                           conv_1_h3_w2_VALID::get_examples());
 }
 
 TEST_F(ValidationTest, conv_1_h3_w2_VALID) {
   const Model model = conv_1_h3_w2_VALID::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_1_h3_w2_VALID::examples);
+  const std::vector<Request> requests = createRequests(conv_1_h3_w2_VALID::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -496,12 +496,12 @@
   generated_tests::Execute(device,
                            conv_3_h3_w2_SAME::createTestModel,
                            conv_3_h3_w2_SAME::is_ignored,
-                           conv_3_h3_w2_SAME::examples);
+                           conv_3_h3_w2_SAME::get_examples());
 }
 
 TEST_F(ValidationTest, conv_3_h3_w2_SAME) {
   const Model model = conv_3_h3_w2_SAME::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_3_h3_w2_SAME::examples);
+  const std::vector<Request> requests = createRequests(conv_3_h3_w2_SAME::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -519,12 +519,12 @@
   generated_tests::Execute(device,
                            conv_3_h3_w2_VALID::createTestModel,
                            conv_3_h3_w2_VALID::is_ignored,
-                           conv_3_h3_w2_VALID::examples);
+                           conv_3_h3_w2_VALID::get_examples());
 }
 
 TEST_F(ValidationTest, conv_3_h3_w2_VALID) {
   const Model model = conv_3_h3_w2_VALID::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_3_h3_w2_VALID::examples);
+  const std::vector<Request> requests = createRequests(conv_3_h3_w2_VALID::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -542,12 +542,12 @@
   generated_tests::Execute(device,
                            conv_float::createTestModel,
                            conv_float::is_ignored,
-                           conv_float::examples);
+                           conv_float::get_examples());
 }
 
 TEST_F(ValidationTest, conv_float) {
   const Model model = conv_float::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_float::examples);
+  const std::vector<Request> requests = createRequests(conv_float::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -565,12 +565,12 @@
   generated_tests::Execute(device,
                            conv_float_2::createTestModel,
                            conv_float_2::is_ignored,
-                           conv_float_2::examples);
+                           conv_float_2::get_examples());
 }
 
 TEST_F(ValidationTest, conv_float_2) {
   const Model model = conv_float_2::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_float_2::examples);
+  const std::vector<Request> requests = createRequests(conv_float_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -588,12 +588,12 @@
   generated_tests::Execute(device,
                            conv_float_channels::createTestModel,
                            conv_float_channels::is_ignored,
-                           conv_float_channels::examples);
+                           conv_float_channels::get_examples());
 }
 
 TEST_F(ValidationTest, conv_float_channels) {
   const Model model = conv_float_channels::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_float_channels::examples);
+  const std::vector<Request> requests = createRequests(conv_float_channels::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -611,12 +611,12 @@
   generated_tests::Execute(device,
                            conv_float_channels_weights_as_inputs::createTestModel,
                            conv_float_channels_weights_as_inputs::is_ignored,
-                           conv_float_channels_weights_as_inputs::examples);
+                           conv_float_channels_weights_as_inputs::get_examples());
 }
 
 TEST_F(ValidationTest, conv_float_channels_weights_as_inputs) {
   const Model model = conv_float_channels_weights_as_inputs::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_float_channels_weights_as_inputs::examples);
+  const std::vector<Request> requests = createRequests(conv_float_channels_weights_as_inputs::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -634,12 +634,12 @@
   generated_tests::Execute(device,
                            conv_float_large::createTestModel,
                            conv_float_large::is_ignored,
-                           conv_float_large::examples);
+                           conv_float_large::get_examples());
 }
 
 TEST_F(ValidationTest, conv_float_large) {
   const Model model = conv_float_large::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_float_large::examples);
+  const std::vector<Request> requests = createRequests(conv_float_large::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -657,12 +657,12 @@
   generated_tests::Execute(device,
                            conv_float_large_weights_as_inputs::createTestModel,
                            conv_float_large_weights_as_inputs::is_ignored,
-                           conv_float_large_weights_as_inputs::examples);
+                           conv_float_large_weights_as_inputs::get_examples());
 }
 
 TEST_F(ValidationTest, conv_float_large_weights_as_inputs) {
   const Model model = conv_float_large_weights_as_inputs::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_float_large_weights_as_inputs::examples);
+  const std::vector<Request> requests = createRequests(conv_float_large_weights_as_inputs::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -680,12 +680,12 @@
   generated_tests::Execute(device,
                            conv_float_weights_as_inputs::createTestModel,
                            conv_float_weights_as_inputs::is_ignored,
-                           conv_float_weights_as_inputs::examples);
+                           conv_float_weights_as_inputs::get_examples());
 }
 
 TEST_F(ValidationTest, conv_float_weights_as_inputs) {
   const Model model = conv_float_weights_as_inputs::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_float_weights_as_inputs::examples);
+  const std::vector<Request> requests = createRequests(conv_float_weights_as_inputs::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -703,12 +703,12 @@
   generated_tests::Execute(device,
                            conv_quant8::createTestModel,
                            conv_quant8::is_ignored,
-                           conv_quant8::examples);
+                           conv_quant8::get_examples());
 }
 
 TEST_F(ValidationTest, conv_quant8) {
   const Model model = conv_quant8::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_quant8::examples);
+  const std::vector<Request> requests = createRequests(conv_quant8::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -726,12 +726,12 @@
   generated_tests::Execute(device,
                            conv_quant8_2::createTestModel,
                            conv_quant8_2::is_ignored,
-                           conv_quant8_2::examples);
+                           conv_quant8_2::get_examples());
 }
 
 TEST_F(ValidationTest, conv_quant8_2) {
   const Model model = conv_quant8_2::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_quant8_2::examples);
+  const std::vector<Request> requests = createRequests(conv_quant8_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -749,12 +749,12 @@
   generated_tests::Execute(device,
                            conv_quant8_channels::createTestModel,
                            conv_quant8_channels::is_ignored,
-                           conv_quant8_channels::examples);
+                           conv_quant8_channels::get_examples());
 }
 
 TEST_F(ValidationTest, conv_quant8_channels) {
   const Model model = conv_quant8_channels::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_quant8_channels::examples);
+  const std::vector<Request> requests = createRequests(conv_quant8_channels::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -772,12 +772,12 @@
   generated_tests::Execute(device,
                            conv_quant8_channels_weights_as_inputs::createTestModel,
                            conv_quant8_channels_weights_as_inputs::is_ignored,
-                           conv_quant8_channels_weights_as_inputs::examples);
+                           conv_quant8_channels_weights_as_inputs::get_examples());
 }
 
 TEST_F(ValidationTest, conv_quant8_channels_weights_as_inputs) {
   const Model model = conv_quant8_channels_weights_as_inputs::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_quant8_channels_weights_as_inputs::examples);
+  const std::vector<Request> requests = createRequests(conv_quant8_channels_weights_as_inputs::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -795,12 +795,12 @@
   generated_tests::Execute(device,
                            conv_quant8_large::createTestModel,
                            conv_quant8_large::is_ignored,
-                           conv_quant8_large::examples);
+                           conv_quant8_large::get_examples());
 }
 
 TEST_F(ValidationTest, conv_quant8_large) {
   const Model model = conv_quant8_large::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_quant8_large::examples);
+  const std::vector<Request> requests = createRequests(conv_quant8_large::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -818,12 +818,12 @@
   generated_tests::Execute(device,
                            conv_quant8_large_weights_as_inputs::createTestModel,
                            conv_quant8_large_weights_as_inputs::is_ignored,
-                           conv_quant8_large_weights_as_inputs::examples);
+                           conv_quant8_large_weights_as_inputs::get_examples());
 }
 
 TEST_F(ValidationTest, conv_quant8_large_weights_as_inputs) {
   const Model model = conv_quant8_large_weights_as_inputs::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_quant8_large_weights_as_inputs::examples);
+  const std::vector<Request> requests = createRequests(conv_quant8_large_weights_as_inputs::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -841,12 +841,12 @@
   generated_tests::Execute(device,
                            conv_quant8_overflow::createTestModel,
                            conv_quant8_overflow::is_ignored,
-                           conv_quant8_overflow::examples);
+                           conv_quant8_overflow::get_examples());
 }
 
 TEST_F(ValidationTest, conv_quant8_overflow) {
   const Model model = conv_quant8_overflow::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_quant8_overflow::examples);
+  const std::vector<Request> requests = createRequests(conv_quant8_overflow::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -864,12 +864,12 @@
   generated_tests::Execute(device,
                            conv_quant8_overflow_weights_as_inputs::createTestModel,
                            conv_quant8_overflow_weights_as_inputs::is_ignored,
-                           conv_quant8_overflow_weights_as_inputs::examples);
+                           conv_quant8_overflow_weights_as_inputs::get_examples());
 }
 
 TEST_F(ValidationTest, conv_quant8_overflow_weights_as_inputs) {
   const Model model = conv_quant8_overflow_weights_as_inputs::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_quant8_overflow_weights_as_inputs::examples);
+  const std::vector<Request> requests = createRequests(conv_quant8_overflow_weights_as_inputs::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -887,12 +887,12 @@
   generated_tests::Execute(device,
                            conv_quant8_weights_as_inputs::createTestModel,
                            conv_quant8_weights_as_inputs::is_ignored,
-                           conv_quant8_weights_as_inputs::examples);
+                           conv_quant8_weights_as_inputs::get_examples());
 }
 
 TEST_F(ValidationTest, conv_quant8_weights_as_inputs) {
   const Model model = conv_quant8_weights_as_inputs::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_quant8_weights_as_inputs::examples);
+  const std::vector<Request> requests = createRequests(conv_quant8_weights_as_inputs::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -910,12 +910,12 @@
   generated_tests::Execute(device,
                            depth_to_space_float_1::createTestModel,
                            depth_to_space_float_1::is_ignored,
-                           depth_to_space_float_1::examples);
+                           depth_to_space_float_1::get_examples());
 }
 
 TEST_F(ValidationTest, depth_to_space_float_1) {
   const Model model = depth_to_space_float_1::createTestModel();
-  const std::vector<Request> requests = createRequests(depth_to_space_float_1::examples);
+  const std::vector<Request> requests = createRequests(depth_to_space_float_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -933,12 +933,12 @@
   generated_tests::Execute(device,
                            depth_to_space_float_2::createTestModel,
                            depth_to_space_float_2::is_ignored,
-                           depth_to_space_float_2::examples);
+                           depth_to_space_float_2::get_examples());
 }
 
 TEST_F(ValidationTest, depth_to_space_float_2) {
   const Model model = depth_to_space_float_2::createTestModel();
-  const std::vector<Request> requests = createRequests(depth_to_space_float_2::examples);
+  const std::vector<Request> requests = createRequests(depth_to_space_float_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -956,12 +956,12 @@
   generated_tests::Execute(device,
                            depth_to_space_float_3::createTestModel,
                            depth_to_space_float_3::is_ignored,
-                           depth_to_space_float_3::examples);
+                           depth_to_space_float_3::get_examples());
 }
 
 TEST_F(ValidationTest, depth_to_space_float_3) {
   const Model model = depth_to_space_float_3::createTestModel();
-  const std::vector<Request> requests = createRequests(depth_to_space_float_3::examples);
+  const std::vector<Request> requests = createRequests(depth_to_space_float_3::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -979,12 +979,12 @@
   generated_tests::Execute(device,
                            depth_to_space_quant8_1::createTestModel,
                            depth_to_space_quant8_1::is_ignored,
-                           depth_to_space_quant8_1::examples);
+                           depth_to_space_quant8_1::get_examples());
 }
 
 TEST_F(ValidationTest, depth_to_space_quant8_1) {
   const Model model = depth_to_space_quant8_1::createTestModel();
-  const std::vector<Request> requests = createRequests(depth_to_space_quant8_1::examples);
+  const std::vector<Request> requests = createRequests(depth_to_space_quant8_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1002,12 +1002,12 @@
   generated_tests::Execute(device,
                            depth_to_space_quant8_2::createTestModel,
                            depth_to_space_quant8_2::is_ignored,
-                           depth_to_space_quant8_2::examples);
+                           depth_to_space_quant8_2::get_examples());
 }
 
 TEST_F(ValidationTest, depth_to_space_quant8_2) {
   const Model model = depth_to_space_quant8_2::createTestModel();
-  const std::vector<Request> requests = createRequests(depth_to_space_quant8_2::examples);
+  const std::vector<Request> requests = createRequests(depth_to_space_quant8_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1025,12 +1025,12 @@
   generated_tests::Execute(device,
                            depthwise_conv::createTestModel,
                            depthwise_conv::is_ignored,
-                           depthwise_conv::examples);
+                           depthwise_conv::get_examples());
 }
 
 TEST_F(ValidationTest, depthwise_conv) {
   const Model model = depthwise_conv::createTestModel();
-  const std::vector<Request> requests = createRequests(depthwise_conv::examples);
+  const std::vector<Request> requests = createRequests(depthwise_conv::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1048,12 +1048,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_float::createTestModel,
                            depthwise_conv2d_float::is_ignored,
-                           depthwise_conv2d_float::examples);
+                           depthwise_conv2d_float::get_examples());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_float) {
   const Model model = depthwise_conv2d_float::createTestModel();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_float::examples);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_float::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1071,12 +1071,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_float_2::createTestModel,
                            depthwise_conv2d_float_2::is_ignored,
-                           depthwise_conv2d_float_2::examples);
+                           depthwise_conv2d_float_2::get_examples());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_float_2) {
   const Model model = depthwise_conv2d_float_2::createTestModel();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_float_2::examples);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_float_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1094,12 +1094,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_float_large::createTestModel,
                            depthwise_conv2d_float_large::is_ignored,
-                           depthwise_conv2d_float_large::examples);
+                           depthwise_conv2d_float_large::get_examples());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_float_large) {
   const Model model = depthwise_conv2d_float_large::createTestModel();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_float_large::examples);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_float_large::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1117,12 +1117,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_float_large_2::createTestModel,
                            depthwise_conv2d_float_large_2::is_ignored,
-                           depthwise_conv2d_float_large_2::examples);
+                           depthwise_conv2d_float_large_2::get_examples());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_float_large_2) {
   const Model model = depthwise_conv2d_float_large_2::createTestModel();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_float_large_2::examples);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_float_large_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1140,12 +1140,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_float_large_2_weights_as_inputs::createTestModel,
                            depthwise_conv2d_float_large_2_weights_as_inputs::is_ignored,
-                           depthwise_conv2d_float_large_2_weights_as_inputs::examples);
+                           depthwise_conv2d_float_large_2_weights_as_inputs::get_examples());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_float_large_2_weights_as_inputs) {
   const Model model = depthwise_conv2d_float_large_2_weights_as_inputs::createTestModel();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_float_large_2_weights_as_inputs::examples);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_float_large_2_weights_as_inputs::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1163,12 +1163,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_float_large_weights_as_inputs::createTestModel,
                            depthwise_conv2d_float_large_weights_as_inputs::is_ignored,
-                           depthwise_conv2d_float_large_weights_as_inputs::examples);
+                           depthwise_conv2d_float_large_weights_as_inputs::get_examples());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_float_large_weights_as_inputs) {
   const Model model = depthwise_conv2d_float_large_weights_as_inputs::createTestModel();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_float_large_weights_as_inputs::examples);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_float_large_weights_as_inputs::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1186,12 +1186,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_float_weights_as_inputs::createTestModel,
                            depthwise_conv2d_float_weights_as_inputs::is_ignored,
-                           depthwise_conv2d_float_weights_as_inputs::examples);
+                           depthwise_conv2d_float_weights_as_inputs::get_examples());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_float_weights_as_inputs) {
   const Model model = depthwise_conv2d_float_weights_as_inputs::createTestModel();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_float_weights_as_inputs::examples);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_float_weights_as_inputs::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1209,12 +1209,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_quant8::createTestModel,
                            depthwise_conv2d_quant8::is_ignored,
-                           depthwise_conv2d_quant8::examples);
+                           depthwise_conv2d_quant8::get_examples());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_quant8) {
   const Model model = depthwise_conv2d_quant8::createTestModel();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_quant8::examples);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_quant8::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1232,12 +1232,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_quant8_2::createTestModel,
                            depthwise_conv2d_quant8_2::is_ignored,
-                           depthwise_conv2d_quant8_2::examples);
+                           depthwise_conv2d_quant8_2::get_examples());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_quant8_2) {
   const Model model = depthwise_conv2d_quant8_2::createTestModel();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_quant8_2::examples);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_quant8_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1255,12 +1255,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_quant8_large::createTestModel,
                            depthwise_conv2d_quant8_large::is_ignored,
-                           depthwise_conv2d_quant8_large::examples);
+                           depthwise_conv2d_quant8_large::get_examples());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_quant8_large) {
   const Model model = depthwise_conv2d_quant8_large::createTestModel();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_quant8_large::examples);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_quant8_large::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1278,12 +1278,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_quant8_large_weights_as_inputs::createTestModel,
                            depthwise_conv2d_quant8_large_weights_as_inputs::is_ignored,
-                           depthwise_conv2d_quant8_large_weights_as_inputs::examples);
+                           depthwise_conv2d_quant8_large_weights_as_inputs::get_examples());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_quant8_large_weights_as_inputs) {
   const Model model = depthwise_conv2d_quant8_large_weights_as_inputs::createTestModel();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_quant8_large_weights_as_inputs::examples);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_quant8_large_weights_as_inputs::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1301,12 +1301,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_quant8_weights_as_inputs::createTestModel,
                            depthwise_conv2d_quant8_weights_as_inputs::is_ignored,
-                           depthwise_conv2d_quant8_weights_as_inputs::examples);
+                           depthwise_conv2d_quant8_weights_as_inputs::get_examples());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_quant8_weights_as_inputs) {
   const Model model = depthwise_conv2d_quant8_weights_as_inputs::createTestModel();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_quant8_weights_as_inputs::examples);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_quant8_weights_as_inputs::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1324,12 +1324,12 @@
   generated_tests::Execute(device,
                            dequantize::createTestModel,
                            dequantize::is_ignored,
-                           dequantize::examples);
+                           dequantize::get_examples());
 }
 
 TEST_F(ValidationTest, dequantize) {
   const Model model = dequantize::createTestModel();
-  const std::vector<Request> requests = createRequests(dequantize::examples);
+  const std::vector<Request> requests = createRequests(dequantize::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1347,12 +1347,12 @@
   generated_tests::Execute(device,
                            embedding_lookup::createTestModel,
                            embedding_lookup::is_ignored,
-                           embedding_lookup::examples);
+                           embedding_lookup::get_examples());
 }
 
 TEST_F(ValidationTest, embedding_lookup) {
   const Model model = embedding_lookup::createTestModel();
-  const std::vector<Request> requests = createRequests(embedding_lookup::examples);
+  const std::vector<Request> requests = createRequests(embedding_lookup::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1370,12 +1370,12 @@
   generated_tests::Execute(device,
                            floor::createTestModel,
                            floor::is_ignored,
-                           floor::examples);
+                           floor::get_examples());
 }
 
 TEST_F(ValidationTest, floor) {
   const Model model = floor::createTestModel();
-  const std::vector<Request> requests = createRequests(floor::examples);
+  const std::vector<Request> requests = createRequests(floor::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1393,12 +1393,12 @@
   generated_tests::Execute(device,
                            fully_connected_float::createTestModel,
                            fully_connected_float::is_ignored,
-                           fully_connected_float::examples);
+                           fully_connected_float::get_examples());
 }
 
 TEST_F(ValidationTest, fully_connected_float) {
   const Model model = fully_connected_float::createTestModel();
-  const std::vector<Request> requests = createRequests(fully_connected_float::examples);
+  const std::vector<Request> requests = createRequests(fully_connected_float::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1416,12 +1416,12 @@
   generated_tests::Execute(device,
                            fully_connected_float_2::createTestModel,
                            fully_connected_float_2::is_ignored,
-                           fully_connected_float_2::examples);
+                           fully_connected_float_2::get_examples());
 }
 
 TEST_F(ValidationTest, fully_connected_float_2) {
   const Model model = fully_connected_float_2::createTestModel();
-  const std::vector<Request> requests = createRequests(fully_connected_float_2::examples);
+  const std::vector<Request> requests = createRequests(fully_connected_float_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1439,12 +1439,12 @@
   generated_tests::Execute(device,
                            fully_connected_float_3::createTestModel,
                            fully_connected_float_3::is_ignored,
-                           fully_connected_float_3::examples);
+                           fully_connected_float_3::get_examples());
 }
 
 TEST_F(ValidationTest, fully_connected_float_3) {
   const Model model = fully_connected_float_3::createTestModel();
-  const std::vector<Request> requests = createRequests(fully_connected_float_3::examples);
+  const std::vector<Request> requests = createRequests(fully_connected_float_3::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1462,12 +1462,12 @@
   generated_tests::Execute(device,
                            fully_connected_float_large::createTestModel,
                            fully_connected_float_large::is_ignored,
-                           fully_connected_float_large::examples);
+                           fully_connected_float_large::get_examples());
 }
 
 TEST_F(ValidationTest, fully_connected_float_large) {
   const Model model = fully_connected_float_large::createTestModel();
-  const std::vector<Request> requests = createRequests(fully_connected_float_large::examples);
+  const std::vector<Request> requests = createRequests(fully_connected_float_large::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1485,12 +1485,12 @@
   generated_tests::Execute(device,
                            fully_connected_float_large_weights_as_inputs::createTestModel,
                            fully_connected_float_large_weights_as_inputs::is_ignored,
-                           fully_connected_float_large_weights_as_inputs::examples);
+                           fully_connected_float_large_weights_as_inputs::get_examples());
 }
 
 TEST_F(ValidationTest, fully_connected_float_large_weights_as_inputs) {
   const Model model = fully_connected_float_large_weights_as_inputs::createTestModel();
-  const std::vector<Request> requests = createRequests(fully_connected_float_large_weights_as_inputs::examples);
+  const std::vector<Request> requests = createRequests(fully_connected_float_large_weights_as_inputs::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1508,12 +1508,12 @@
   generated_tests::Execute(device,
                            fully_connected_float_weights_as_inputs::createTestModel,
                            fully_connected_float_weights_as_inputs::is_ignored,
-                           fully_connected_float_weights_as_inputs::examples);
+                           fully_connected_float_weights_as_inputs::get_examples());
 }
 
 TEST_F(ValidationTest, fully_connected_float_weights_as_inputs) {
   const Model model = fully_connected_float_weights_as_inputs::createTestModel();
-  const std::vector<Request> requests = createRequests(fully_connected_float_weights_as_inputs::examples);
+  const std::vector<Request> requests = createRequests(fully_connected_float_weights_as_inputs::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1531,12 +1531,12 @@
   generated_tests::Execute(device,
                            fully_connected_quant8::createTestModel,
                            fully_connected_quant8::is_ignored,
-                           fully_connected_quant8::examples);
+                           fully_connected_quant8::get_examples());
 }
 
 TEST_F(ValidationTest, fully_connected_quant8) {
   const Model model = fully_connected_quant8::createTestModel();
-  const std::vector<Request> requests = createRequests(fully_connected_quant8::examples);
+  const std::vector<Request> requests = createRequests(fully_connected_quant8::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1554,12 +1554,12 @@
   generated_tests::Execute(device,
                            fully_connected_quant8_2::createTestModel,
                            fully_connected_quant8_2::is_ignored,
-                           fully_connected_quant8_2::examples);
+                           fully_connected_quant8_2::get_examples());
 }
 
 TEST_F(ValidationTest, fully_connected_quant8_2) {
   const Model model = fully_connected_quant8_2::createTestModel();
-  const std::vector<Request> requests = createRequests(fully_connected_quant8_2::examples);
+  const std::vector<Request> requests = createRequests(fully_connected_quant8_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1577,12 +1577,12 @@
   generated_tests::Execute(device,
                            fully_connected_quant8_large::createTestModel,
                            fully_connected_quant8_large::is_ignored,
-                           fully_connected_quant8_large::examples);
+                           fully_connected_quant8_large::get_examples());
 }
 
 TEST_F(ValidationTest, fully_connected_quant8_large) {
   const Model model = fully_connected_quant8_large::createTestModel();
-  const std::vector<Request> requests = createRequests(fully_connected_quant8_large::examples);
+  const std::vector<Request> requests = createRequests(fully_connected_quant8_large::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1600,12 +1600,12 @@
   generated_tests::Execute(device,
                            fully_connected_quant8_large_weights_as_inputs::createTestModel,
                            fully_connected_quant8_large_weights_as_inputs::is_ignored,
-                           fully_connected_quant8_large_weights_as_inputs::examples);
+                           fully_connected_quant8_large_weights_as_inputs::get_examples());
 }
 
 TEST_F(ValidationTest, fully_connected_quant8_large_weights_as_inputs) {
   const Model model = fully_connected_quant8_large_weights_as_inputs::createTestModel();
-  const std::vector<Request> requests = createRequests(fully_connected_quant8_large_weights_as_inputs::examples);
+  const std::vector<Request> requests = createRequests(fully_connected_quant8_large_weights_as_inputs::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1623,12 +1623,12 @@
   generated_tests::Execute(device,
                            fully_connected_quant8_weights_as_inputs::createTestModel,
                            fully_connected_quant8_weights_as_inputs::is_ignored,
-                           fully_connected_quant8_weights_as_inputs::examples);
+                           fully_connected_quant8_weights_as_inputs::get_examples());
 }
 
 TEST_F(ValidationTest, fully_connected_quant8_weights_as_inputs) {
   const Model model = fully_connected_quant8_weights_as_inputs::createTestModel();
-  const std::vector<Request> requests = createRequests(fully_connected_quant8_weights_as_inputs::examples);
+  const std::vector<Request> requests = createRequests(fully_connected_quant8_weights_as_inputs::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1646,12 +1646,12 @@
   generated_tests::Execute(device,
                            hashtable_lookup_float::createTestModel,
                            hashtable_lookup_float::is_ignored,
-                           hashtable_lookup_float::examples);
+                           hashtable_lookup_float::get_examples());
 }
 
 TEST_F(ValidationTest, hashtable_lookup_float) {
   const Model model = hashtable_lookup_float::createTestModel();
-  const std::vector<Request> requests = createRequests(hashtable_lookup_float::examples);
+  const std::vector<Request> requests = createRequests(hashtable_lookup_float::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1669,12 +1669,12 @@
   generated_tests::Execute(device,
                            hashtable_lookup_quant8::createTestModel,
                            hashtable_lookup_quant8::is_ignored,
-                           hashtable_lookup_quant8::examples);
+                           hashtable_lookup_quant8::get_examples());
 }
 
 TEST_F(ValidationTest, hashtable_lookup_quant8) {
   const Model model = hashtable_lookup_quant8::createTestModel();
-  const std::vector<Request> requests = createRequests(hashtable_lookup_quant8::examples);
+  const std::vector<Request> requests = createRequests(hashtable_lookup_quant8::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1692,12 +1692,12 @@
   generated_tests::Execute(device,
                            l2_normalization::createTestModel,
                            l2_normalization::is_ignored,
-                           l2_normalization::examples);
+                           l2_normalization::get_examples());
 }
 
 TEST_F(ValidationTest, l2_normalization) {
   const Model model = l2_normalization::createTestModel();
-  const std::vector<Request> requests = createRequests(l2_normalization::examples);
+  const std::vector<Request> requests = createRequests(l2_normalization::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1715,12 +1715,12 @@
   generated_tests::Execute(device,
                            l2_normalization_2::createTestModel,
                            l2_normalization_2::is_ignored,
-                           l2_normalization_2::examples);
+                           l2_normalization_2::get_examples());
 }
 
 TEST_F(ValidationTest, l2_normalization_2) {
   const Model model = l2_normalization_2::createTestModel();
-  const std::vector<Request> requests = createRequests(l2_normalization_2::examples);
+  const std::vector<Request> requests = createRequests(l2_normalization_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1738,12 +1738,12 @@
   generated_tests::Execute(device,
                            l2_normalization_large::createTestModel,
                            l2_normalization_large::is_ignored,
-                           l2_normalization_large::examples);
+                           l2_normalization_large::get_examples());
 }
 
 TEST_F(ValidationTest, l2_normalization_large) {
   const Model model = l2_normalization_large::createTestModel();
-  const std::vector<Request> requests = createRequests(l2_normalization_large::examples);
+  const std::vector<Request> requests = createRequests(l2_normalization_large::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1761,12 +1761,12 @@
   generated_tests::Execute(device,
                            l2_pool_float::createTestModel,
                            l2_pool_float::is_ignored,
-                           l2_pool_float::examples);
+                           l2_pool_float::get_examples());
 }
 
 TEST_F(ValidationTest, l2_pool_float) {
   const Model model = l2_pool_float::createTestModel();
-  const std::vector<Request> requests = createRequests(l2_pool_float::examples);
+  const std::vector<Request> requests = createRequests(l2_pool_float::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1784,12 +1784,12 @@
   generated_tests::Execute(device,
                            l2_pool_float_2::createTestModel,
                            l2_pool_float_2::is_ignored,
-                           l2_pool_float_2::examples);
+                           l2_pool_float_2::get_examples());
 }
 
 TEST_F(ValidationTest, l2_pool_float_2) {
   const Model model = l2_pool_float_2::createTestModel();
-  const std::vector<Request> requests = createRequests(l2_pool_float_2::examples);
+  const std::vector<Request> requests = createRequests(l2_pool_float_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1807,12 +1807,12 @@
   generated_tests::Execute(device,
                            l2_pool_float_large::createTestModel,
                            l2_pool_float_large::is_ignored,
-                           l2_pool_float_large::examples);
+                           l2_pool_float_large::get_examples());
 }
 
 TEST_F(ValidationTest, l2_pool_float_large) {
   const Model model = l2_pool_float_large::createTestModel();
-  const std::vector<Request> requests = createRequests(l2_pool_float_large::examples);
+  const std::vector<Request> requests = createRequests(l2_pool_float_large::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1830,12 +1830,12 @@
   generated_tests::Execute(device,
                            local_response_norm_float_1::createTestModel,
                            local_response_norm_float_1::is_ignored,
-                           local_response_norm_float_1::examples);
+                           local_response_norm_float_1::get_examples());
 }
 
 TEST_F(ValidationTest, local_response_norm_float_1) {
   const Model model = local_response_norm_float_1::createTestModel();
-  const std::vector<Request> requests = createRequests(local_response_norm_float_1::examples);
+  const std::vector<Request> requests = createRequests(local_response_norm_float_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1853,12 +1853,12 @@
   generated_tests::Execute(device,
                            local_response_norm_float_2::createTestModel,
                            local_response_norm_float_2::is_ignored,
-                           local_response_norm_float_2::examples);
+                           local_response_norm_float_2::get_examples());
 }
 
 TEST_F(ValidationTest, local_response_norm_float_2) {
   const Model model = local_response_norm_float_2::createTestModel();
-  const std::vector<Request> requests = createRequests(local_response_norm_float_2::examples);
+  const std::vector<Request> requests = createRequests(local_response_norm_float_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1876,12 +1876,12 @@
   generated_tests::Execute(device,
                            local_response_norm_float_3::createTestModel,
                            local_response_norm_float_3::is_ignored,
-                           local_response_norm_float_3::examples);
+                           local_response_norm_float_3::get_examples());
 }
 
 TEST_F(ValidationTest, local_response_norm_float_3) {
   const Model model = local_response_norm_float_3::createTestModel();
-  const std::vector<Request> requests = createRequests(local_response_norm_float_3::examples);
+  const std::vector<Request> requests = createRequests(local_response_norm_float_3::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1899,12 +1899,12 @@
   generated_tests::Execute(device,
                            local_response_norm_float_4::createTestModel,
                            local_response_norm_float_4::is_ignored,
-                           local_response_norm_float_4::examples);
+                           local_response_norm_float_4::get_examples());
 }
 
 TEST_F(ValidationTest, local_response_norm_float_4) {
   const Model model = local_response_norm_float_4::createTestModel();
-  const std::vector<Request> requests = createRequests(local_response_norm_float_4::examples);
+  const std::vector<Request> requests = createRequests(local_response_norm_float_4::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1922,12 +1922,12 @@
   generated_tests::Execute(device,
                            logistic_float_1::createTestModel,
                            logistic_float_1::is_ignored,
-                           logistic_float_1::examples);
+                           logistic_float_1::get_examples());
 }
 
 TEST_F(ValidationTest, logistic_float_1) {
   const Model model = logistic_float_1::createTestModel();
-  const std::vector<Request> requests = createRequests(logistic_float_1::examples);
+  const std::vector<Request> requests = createRequests(logistic_float_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1945,12 +1945,12 @@
   generated_tests::Execute(device,
                            logistic_float_2::createTestModel,
                            logistic_float_2::is_ignored,
-                           logistic_float_2::examples);
+                           logistic_float_2::get_examples());
 }
 
 TEST_F(ValidationTest, logistic_float_2) {
   const Model model = logistic_float_2::createTestModel();
-  const std::vector<Request> requests = createRequests(logistic_float_2::examples);
+  const std::vector<Request> requests = createRequests(logistic_float_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1968,12 +1968,12 @@
   generated_tests::Execute(device,
                            logistic_quant8_1::createTestModel,
                            logistic_quant8_1::is_ignored,
-                           logistic_quant8_1::examples);
+                           logistic_quant8_1::get_examples());
 }
 
 TEST_F(ValidationTest, logistic_quant8_1) {
   const Model model = logistic_quant8_1::createTestModel();
-  const std::vector<Request> requests = createRequests(logistic_quant8_1::examples);
+  const std::vector<Request> requests = createRequests(logistic_quant8_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1991,12 +1991,12 @@
   generated_tests::Execute(device,
                            logistic_quant8_2::createTestModel,
                            logistic_quant8_2::is_ignored,
-                           logistic_quant8_2::examples);
+                           logistic_quant8_2::get_examples());
 }
 
 TEST_F(ValidationTest, logistic_quant8_2) {
   const Model model = logistic_quant8_2::createTestModel();
-  const std::vector<Request> requests = createRequests(logistic_quant8_2::examples);
+  const std::vector<Request> requests = createRequests(logistic_quant8_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2014,12 +2014,12 @@
   generated_tests::Execute(device,
                            lsh_projection::createTestModel,
                            lsh_projection::is_ignored,
-                           lsh_projection::examples);
+                           lsh_projection::get_examples());
 }
 
 TEST_F(ValidationTest, lsh_projection) {
   const Model model = lsh_projection::createTestModel();
-  const std::vector<Request> requests = createRequests(lsh_projection::examples);
+  const std::vector<Request> requests = createRequests(lsh_projection::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2037,12 +2037,12 @@
   generated_tests::Execute(device,
                            lsh_projection_2::createTestModel,
                            lsh_projection_2::is_ignored,
-                           lsh_projection_2::examples);
+                           lsh_projection_2::get_examples());
 }
 
 TEST_F(ValidationTest, lsh_projection_2) {
   const Model model = lsh_projection_2::createTestModel();
-  const std::vector<Request> requests = createRequests(lsh_projection_2::examples);
+  const std::vector<Request> requests = createRequests(lsh_projection_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2060,12 +2060,12 @@
   generated_tests::Execute(device,
                            lsh_projection_weights_as_inputs::createTestModel,
                            lsh_projection_weights_as_inputs::is_ignored,
-                           lsh_projection_weights_as_inputs::examples);
+                           lsh_projection_weights_as_inputs::get_examples());
 }
 
 TEST_F(ValidationTest, lsh_projection_weights_as_inputs) {
   const Model model = lsh_projection_weights_as_inputs::createTestModel();
-  const std::vector<Request> requests = createRequests(lsh_projection_weights_as_inputs::examples);
+  const std::vector<Request> requests = createRequests(lsh_projection_weights_as_inputs::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2083,12 +2083,12 @@
   generated_tests::Execute(device,
                            lstm::createTestModel,
                            lstm::is_ignored,
-                           lstm::examples);
+                           lstm::get_examples());
 }
 
 TEST_F(ValidationTest, lstm) {
   const Model model = lstm::createTestModel();
-  const std::vector<Request> requests = createRequests(lstm::examples);
+  const std::vector<Request> requests = createRequests(lstm::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2106,12 +2106,12 @@
   generated_tests::Execute(device,
                            lstm2::createTestModel,
                            lstm2::is_ignored,
-                           lstm2::examples);
+                           lstm2::get_examples());
 }
 
 TEST_F(ValidationTest, lstm2) {
   const Model model = lstm2::createTestModel();
-  const std::vector<Request> requests = createRequests(lstm2::examples);
+  const std::vector<Request> requests = createRequests(lstm2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2129,12 +2129,12 @@
   generated_tests::Execute(device,
                            lstm2_state::createTestModel,
                            lstm2_state::is_ignored,
-                           lstm2_state::examples);
+                           lstm2_state::get_examples());
 }
 
 TEST_F(ValidationTest, lstm2_state) {
   const Model model = lstm2_state::createTestModel();
-  const std::vector<Request> requests = createRequests(lstm2_state::examples);
+  const std::vector<Request> requests = createRequests(lstm2_state::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2152,12 +2152,12 @@
   generated_tests::Execute(device,
                            lstm2_state2::createTestModel,
                            lstm2_state2::is_ignored,
-                           lstm2_state2::examples);
+                           lstm2_state2::get_examples());
 }
 
 TEST_F(ValidationTest, lstm2_state2) {
   const Model model = lstm2_state2::createTestModel();
-  const std::vector<Request> requests = createRequests(lstm2_state2::examples);
+  const std::vector<Request> requests = createRequests(lstm2_state2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2175,12 +2175,12 @@
   generated_tests::Execute(device,
                            lstm3::createTestModel,
                            lstm3::is_ignored,
-                           lstm3::examples);
+                           lstm3::get_examples());
 }
 
 TEST_F(ValidationTest, lstm3) {
   const Model model = lstm3::createTestModel();
-  const std::vector<Request> requests = createRequests(lstm3::examples);
+  const std::vector<Request> requests = createRequests(lstm3::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2198,12 +2198,12 @@
   generated_tests::Execute(device,
                            lstm3_state::createTestModel,
                            lstm3_state::is_ignored,
-                           lstm3_state::examples);
+                           lstm3_state::get_examples());
 }
 
 TEST_F(ValidationTest, lstm3_state) {
   const Model model = lstm3_state::createTestModel();
-  const std::vector<Request> requests = createRequests(lstm3_state::examples);
+  const std::vector<Request> requests = createRequests(lstm3_state::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2221,12 +2221,12 @@
   generated_tests::Execute(device,
                            lstm3_state2::createTestModel,
                            lstm3_state2::is_ignored,
-                           lstm3_state2::examples);
+                           lstm3_state2::get_examples());
 }
 
 TEST_F(ValidationTest, lstm3_state2) {
   const Model model = lstm3_state2::createTestModel();
-  const std::vector<Request> requests = createRequests(lstm3_state2::examples);
+  const std::vector<Request> requests = createRequests(lstm3_state2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2244,12 +2244,12 @@
   generated_tests::Execute(device,
                            lstm3_state3::createTestModel,
                            lstm3_state3::is_ignored,
-                           lstm3_state3::examples);
+                           lstm3_state3::get_examples());
 }
 
 TEST_F(ValidationTest, lstm3_state3) {
   const Model model = lstm3_state3::createTestModel();
-  const std::vector<Request> requests = createRequests(lstm3_state3::examples);
+  const std::vector<Request> requests = createRequests(lstm3_state3::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2267,12 +2267,12 @@
   generated_tests::Execute(device,
                            lstm_state::createTestModel,
                            lstm_state::is_ignored,
-                           lstm_state::examples);
+                           lstm_state::get_examples());
 }
 
 TEST_F(ValidationTest, lstm_state) {
   const Model model = lstm_state::createTestModel();
-  const std::vector<Request> requests = createRequests(lstm_state::examples);
+  const std::vector<Request> requests = createRequests(lstm_state::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2290,12 +2290,12 @@
   generated_tests::Execute(device,
                            lstm_state2::createTestModel,
                            lstm_state2::is_ignored,
-                           lstm_state2::examples);
+                           lstm_state2::get_examples());
 }
 
 TEST_F(ValidationTest, lstm_state2) {
   const Model model = lstm_state2::createTestModel();
-  const std::vector<Request> requests = createRequests(lstm_state2::examples);
+  const std::vector<Request> requests = createRequests(lstm_state2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2313,12 +2313,12 @@
   generated_tests::Execute(device,
                            max_pool_float_1::createTestModel,
                            max_pool_float_1::is_ignored,
-                           max_pool_float_1::examples);
+                           max_pool_float_1::get_examples());
 }
 
 TEST_F(ValidationTest, max_pool_float_1) {
   const Model model = max_pool_float_1::createTestModel();
-  const std::vector<Request> requests = createRequests(max_pool_float_1::examples);
+  const std::vector<Request> requests = createRequests(max_pool_float_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2336,12 +2336,12 @@
   generated_tests::Execute(device,
                            max_pool_float_2::createTestModel,
                            max_pool_float_2::is_ignored,
-                           max_pool_float_2::examples);
+                           max_pool_float_2::get_examples());
 }
 
 TEST_F(ValidationTest, max_pool_float_2) {
   const Model model = max_pool_float_2::createTestModel();
-  const std::vector<Request> requests = createRequests(max_pool_float_2::examples);
+  const std::vector<Request> requests = createRequests(max_pool_float_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2359,12 +2359,12 @@
   generated_tests::Execute(device,
                            max_pool_float_3::createTestModel,
                            max_pool_float_3::is_ignored,
-                           max_pool_float_3::examples);
+                           max_pool_float_3::get_examples());
 }
 
 TEST_F(ValidationTest, max_pool_float_3) {
   const Model model = max_pool_float_3::createTestModel();
-  const std::vector<Request> requests = createRequests(max_pool_float_3::examples);
+  const std::vector<Request> requests = createRequests(max_pool_float_3::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2382,12 +2382,12 @@
   generated_tests::Execute(device,
                            max_pool_float_4::createTestModel,
                            max_pool_float_4::is_ignored,
-                           max_pool_float_4::examples);
+                           max_pool_float_4::get_examples());
 }
 
 TEST_F(ValidationTest, max_pool_float_4) {
   const Model model = max_pool_float_4::createTestModel();
-  const std::vector<Request> requests = createRequests(max_pool_float_4::examples);
+  const std::vector<Request> requests = createRequests(max_pool_float_4::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2405,12 +2405,12 @@
   generated_tests::Execute(device,
                            max_pool_quant8_1::createTestModel,
                            max_pool_quant8_1::is_ignored,
-                           max_pool_quant8_1::examples);
+                           max_pool_quant8_1::get_examples());
 }
 
 TEST_F(ValidationTest, max_pool_quant8_1) {
   const Model model = max_pool_quant8_1::createTestModel();
-  const std::vector<Request> requests = createRequests(max_pool_quant8_1::examples);
+  const std::vector<Request> requests = createRequests(max_pool_quant8_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2428,12 +2428,12 @@
   generated_tests::Execute(device,
                            max_pool_quant8_2::createTestModel,
                            max_pool_quant8_2::is_ignored,
-                           max_pool_quant8_2::examples);
+                           max_pool_quant8_2::get_examples());
 }
 
 TEST_F(ValidationTest, max_pool_quant8_2) {
   const Model model = max_pool_quant8_2::createTestModel();
-  const std::vector<Request> requests = createRequests(max_pool_quant8_2::examples);
+  const std::vector<Request> requests = createRequests(max_pool_quant8_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2451,12 +2451,12 @@
   generated_tests::Execute(device,
                            max_pool_quant8_3::createTestModel,
                            max_pool_quant8_3::is_ignored,
-                           max_pool_quant8_3::examples);
+                           max_pool_quant8_3::get_examples());
 }
 
 TEST_F(ValidationTest, max_pool_quant8_3) {
   const Model model = max_pool_quant8_3::createTestModel();
-  const std::vector<Request> requests = createRequests(max_pool_quant8_3::examples);
+  const std::vector<Request> requests = createRequests(max_pool_quant8_3::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2474,12 +2474,12 @@
   generated_tests::Execute(device,
                            max_pool_quant8_4::createTestModel,
                            max_pool_quant8_4::is_ignored,
-                           max_pool_quant8_4::examples);
+                           max_pool_quant8_4::get_examples());
 }
 
 TEST_F(ValidationTest, max_pool_quant8_4) {
   const Model model = max_pool_quant8_4::createTestModel();
-  const std::vector<Request> requests = createRequests(max_pool_quant8_4::examples);
+  const std::vector<Request> requests = createRequests(max_pool_quant8_4::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2497,12 +2497,12 @@
   generated_tests::Execute(device,
                            mobilenet_224_gender_basic_fixed::createTestModel,
                            mobilenet_224_gender_basic_fixed::is_ignored,
-                           mobilenet_224_gender_basic_fixed::examples);
+                           mobilenet_224_gender_basic_fixed::get_examples());
 }
 
 TEST_F(ValidationTest, mobilenet_224_gender_basic_fixed) {
   const Model model = mobilenet_224_gender_basic_fixed::createTestModel();
-  const std::vector<Request> requests = createRequests(mobilenet_224_gender_basic_fixed::examples);
+  const std::vector<Request> requests = createRequests(mobilenet_224_gender_basic_fixed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2520,12 +2520,12 @@
   generated_tests::Execute(device,
                            mobilenet_quantized::createTestModel,
                            mobilenet_quantized::is_ignored,
-                           mobilenet_quantized::examples);
+                           mobilenet_quantized::get_examples());
 }
 
 TEST_F(ValidationTest, mobilenet_quantized) {
   const Model model = mobilenet_quantized::createTestModel();
-  const std::vector<Request> requests = createRequests(mobilenet_quantized::examples);
+  const std::vector<Request> requests = createRequests(mobilenet_quantized::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2543,12 +2543,12 @@
   generated_tests::Execute(device,
                            mul::createTestModel,
                            mul::is_ignored,
-                           mul::examples);
+                           mul::get_examples());
 }
 
 TEST_F(ValidationTest, mul) {
   const Model model = mul::createTestModel();
-  const std::vector<Request> requests = createRequests(mul::examples);
+  const std::vector<Request> requests = createRequests(mul::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2566,12 +2566,12 @@
   generated_tests::Execute(device,
                            mul_broadcast_quant8::createTestModel,
                            mul_broadcast_quant8::is_ignored,
-                           mul_broadcast_quant8::examples);
+                           mul_broadcast_quant8::get_examples());
 }
 
 TEST_F(ValidationTest, mul_broadcast_quant8) {
   const Model model = mul_broadcast_quant8::createTestModel();
-  const std::vector<Request> requests = createRequests(mul_broadcast_quant8::examples);
+  const std::vector<Request> requests = createRequests(mul_broadcast_quant8::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2589,12 +2589,12 @@
   generated_tests::Execute(device,
                            mul_quant8::createTestModel,
                            mul_quant8::is_ignored,
-                           mul_quant8::examples);
+                           mul_quant8::get_examples());
 }
 
 TEST_F(ValidationTest, mul_quant8) {
   const Model model = mul_quant8::createTestModel();
-  const std::vector<Request> requests = createRequests(mul_quant8::examples);
+  const std::vector<Request> requests = createRequests(mul_quant8::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2612,12 +2612,12 @@
   generated_tests::Execute(device,
                            mul_relu::createTestModel,
                            mul_relu::is_ignored,
-                           mul_relu::examples);
+                           mul_relu::get_examples());
 }
 
 TEST_F(ValidationTest, mul_relu) {
   const Model model = mul_relu::createTestModel();
-  const std::vector<Request> requests = createRequests(mul_relu::examples);
+  const std::vector<Request> requests = createRequests(mul_relu::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2635,12 +2635,12 @@
   generated_tests::Execute(device,
                            relu1_float_1::createTestModel,
                            relu1_float_1::is_ignored,
-                           relu1_float_1::examples);
+                           relu1_float_1::get_examples());
 }
 
 TEST_F(ValidationTest, relu1_float_1) {
   const Model model = relu1_float_1::createTestModel();
-  const std::vector<Request> requests = createRequests(relu1_float_1::examples);
+  const std::vector<Request> requests = createRequests(relu1_float_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2658,12 +2658,12 @@
   generated_tests::Execute(device,
                            relu1_float_2::createTestModel,
                            relu1_float_2::is_ignored,
-                           relu1_float_2::examples);
+                           relu1_float_2::get_examples());
 }
 
 TEST_F(ValidationTest, relu1_float_2) {
   const Model model = relu1_float_2::createTestModel();
-  const std::vector<Request> requests = createRequests(relu1_float_2::examples);
+  const std::vector<Request> requests = createRequests(relu1_float_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2681,12 +2681,12 @@
   generated_tests::Execute(device,
                            relu1_quant8_1::createTestModel,
                            relu1_quant8_1::is_ignored,
-                           relu1_quant8_1::examples);
+                           relu1_quant8_1::get_examples());
 }
 
 TEST_F(ValidationTest, relu1_quant8_1) {
   const Model model = relu1_quant8_1::createTestModel();
-  const std::vector<Request> requests = createRequests(relu1_quant8_1::examples);
+  const std::vector<Request> requests = createRequests(relu1_quant8_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2704,12 +2704,12 @@
   generated_tests::Execute(device,
                            relu1_quant8_2::createTestModel,
                            relu1_quant8_2::is_ignored,
-                           relu1_quant8_2::examples);
+                           relu1_quant8_2::get_examples());
 }
 
 TEST_F(ValidationTest, relu1_quant8_2) {
   const Model model = relu1_quant8_2::createTestModel();
-  const std::vector<Request> requests = createRequests(relu1_quant8_2::examples);
+  const std::vector<Request> requests = createRequests(relu1_quant8_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2727,12 +2727,12 @@
   generated_tests::Execute(device,
                            relu6_float_1::createTestModel,
                            relu6_float_1::is_ignored,
-                           relu6_float_1::examples);
+                           relu6_float_1::get_examples());
 }
 
 TEST_F(ValidationTest, relu6_float_1) {
   const Model model = relu6_float_1::createTestModel();
-  const std::vector<Request> requests = createRequests(relu6_float_1::examples);
+  const std::vector<Request> requests = createRequests(relu6_float_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2750,12 +2750,12 @@
   generated_tests::Execute(device,
                            relu6_float_2::createTestModel,
                            relu6_float_2::is_ignored,
-                           relu6_float_2::examples);
+                           relu6_float_2::get_examples());
 }
 
 TEST_F(ValidationTest, relu6_float_2) {
   const Model model = relu6_float_2::createTestModel();
-  const std::vector<Request> requests = createRequests(relu6_float_2::examples);
+  const std::vector<Request> requests = createRequests(relu6_float_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2773,12 +2773,12 @@
   generated_tests::Execute(device,
                            relu6_quant8_1::createTestModel,
                            relu6_quant8_1::is_ignored,
-                           relu6_quant8_1::examples);
+                           relu6_quant8_1::get_examples());
 }
 
 TEST_F(ValidationTest, relu6_quant8_1) {
   const Model model = relu6_quant8_1::createTestModel();
-  const std::vector<Request> requests = createRequests(relu6_quant8_1::examples);
+  const std::vector<Request> requests = createRequests(relu6_quant8_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2796,12 +2796,12 @@
   generated_tests::Execute(device,
                            relu6_quant8_2::createTestModel,
                            relu6_quant8_2::is_ignored,
-                           relu6_quant8_2::examples);
+                           relu6_quant8_2::get_examples());
 }
 
 TEST_F(ValidationTest, relu6_quant8_2) {
   const Model model = relu6_quant8_2::createTestModel();
-  const std::vector<Request> requests = createRequests(relu6_quant8_2::examples);
+  const std::vector<Request> requests = createRequests(relu6_quant8_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2819,12 +2819,12 @@
   generated_tests::Execute(device,
                            relu_float_1::createTestModel,
                            relu_float_1::is_ignored,
-                           relu_float_1::examples);
+                           relu_float_1::get_examples());
 }
 
 TEST_F(ValidationTest, relu_float_1) {
   const Model model = relu_float_1::createTestModel();
-  const std::vector<Request> requests = createRequests(relu_float_1::examples);
+  const std::vector<Request> requests = createRequests(relu_float_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2842,12 +2842,12 @@
   generated_tests::Execute(device,
                            relu_float_2::createTestModel,
                            relu_float_2::is_ignored,
-                           relu_float_2::examples);
+                           relu_float_2::get_examples());
 }
 
 TEST_F(ValidationTest, relu_float_2) {
   const Model model = relu_float_2::createTestModel();
-  const std::vector<Request> requests = createRequests(relu_float_2::examples);
+  const std::vector<Request> requests = createRequests(relu_float_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2865,12 +2865,12 @@
   generated_tests::Execute(device,
                            relu_quant8_1::createTestModel,
                            relu_quant8_1::is_ignored,
-                           relu_quant8_1::examples);
+                           relu_quant8_1::get_examples());
 }
 
 TEST_F(ValidationTest, relu_quant8_1) {
   const Model model = relu_quant8_1::createTestModel();
-  const std::vector<Request> requests = createRequests(relu_quant8_1::examples);
+  const std::vector<Request> requests = createRequests(relu_quant8_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2888,12 +2888,12 @@
   generated_tests::Execute(device,
                            relu_quant8_2::createTestModel,
                            relu_quant8_2::is_ignored,
-                           relu_quant8_2::examples);
+                           relu_quant8_2::get_examples());
 }
 
 TEST_F(ValidationTest, relu_quant8_2) {
   const Model model = relu_quant8_2::createTestModel();
-  const std::vector<Request> requests = createRequests(relu_quant8_2::examples);
+  const std::vector<Request> requests = createRequests(relu_quant8_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2911,12 +2911,12 @@
   generated_tests::Execute(device,
                            reshape::createTestModel,
                            reshape::is_ignored,
-                           reshape::examples);
+                           reshape::get_examples());
 }
 
 TEST_F(ValidationTest, reshape) {
   const Model model = reshape::createTestModel();
-  const std::vector<Request> requests = createRequests(reshape::examples);
+  const std::vector<Request> requests = createRequests(reshape::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2934,12 +2934,12 @@
   generated_tests::Execute(device,
                            reshape_quant8::createTestModel,
                            reshape_quant8::is_ignored,
-                           reshape_quant8::examples);
+                           reshape_quant8::get_examples());
 }
 
 TEST_F(ValidationTest, reshape_quant8) {
   const Model model = reshape_quant8::createTestModel();
-  const std::vector<Request> requests = createRequests(reshape_quant8::examples);
+  const std::vector<Request> requests = createRequests(reshape_quant8::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2957,12 +2957,12 @@
   generated_tests::Execute(device,
                            reshape_quant8_weights_as_inputs::createTestModel,
                            reshape_quant8_weights_as_inputs::is_ignored,
-                           reshape_quant8_weights_as_inputs::examples);
+                           reshape_quant8_weights_as_inputs::get_examples());
 }
 
 TEST_F(ValidationTest, reshape_quant8_weights_as_inputs) {
   const Model model = reshape_quant8_weights_as_inputs::createTestModel();
-  const std::vector<Request> requests = createRequests(reshape_quant8_weights_as_inputs::examples);
+  const std::vector<Request> requests = createRequests(reshape_quant8_weights_as_inputs::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2980,12 +2980,12 @@
   generated_tests::Execute(device,
                            reshape_weights_as_inputs::createTestModel,
                            reshape_weights_as_inputs::is_ignored,
-                           reshape_weights_as_inputs::examples);
+                           reshape_weights_as_inputs::get_examples());
 }
 
 TEST_F(ValidationTest, reshape_weights_as_inputs) {
   const Model model = reshape_weights_as_inputs::createTestModel();
-  const std::vector<Request> requests = createRequests(reshape_weights_as_inputs::examples);
+  const std::vector<Request> requests = createRequests(reshape_weights_as_inputs::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3003,12 +3003,12 @@
   generated_tests::Execute(device,
                            resize_bilinear::createTestModel,
                            resize_bilinear::is_ignored,
-                           resize_bilinear::examples);
+                           resize_bilinear::get_examples());
 }
 
 TEST_F(ValidationTest, resize_bilinear) {
   const Model model = resize_bilinear::createTestModel();
-  const std::vector<Request> requests = createRequests(resize_bilinear::examples);
+  const std::vector<Request> requests = createRequests(resize_bilinear::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3026,12 +3026,12 @@
   generated_tests::Execute(device,
                            resize_bilinear_2::createTestModel,
                            resize_bilinear_2::is_ignored,
-                           resize_bilinear_2::examples);
+                           resize_bilinear_2::get_examples());
 }
 
 TEST_F(ValidationTest, resize_bilinear_2) {
   const Model model = resize_bilinear_2::createTestModel();
-  const std::vector<Request> requests = createRequests(resize_bilinear_2::examples);
+  const std::vector<Request> requests = createRequests(resize_bilinear_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3049,12 +3049,12 @@
   generated_tests::Execute(device,
                            rnn::createTestModel,
                            rnn::is_ignored,
-                           rnn::examples);
+                           rnn::get_examples());
 }
 
 TEST_F(ValidationTest, rnn) {
   const Model model = rnn::createTestModel();
-  const std::vector<Request> requests = createRequests(rnn::examples);
+  const std::vector<Request> requests = createRequests(rnn::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3072,12 +3072,12 @@
   generated_tests::Execute(device,
                            rnn_state::createTestModel,
                            rnn_state::is_ignored,
-                           rnn_state::examples);
+                           rnn_state::get_examples());
 }
 
 TEST_F(ValidationTest, rnn_state) {
   const Model model = rnn_state::createTestModel();
-  const std::vector<Request> requests = createRequests(rnn_state::examples);
+  const std::vector<Request> requests = createRequests(rnn_state::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3095,12 +3095,12 @@
   generated_tests::Execute(device,
                            softmax_float_1::createTestModel,
                            softmax_float_1::is_ignored,
-                           softmax_float_1::examples);
+                           softmax_float_1::get_examples());
 }
 
 TEST_F(ValidationTest, softmax_float_1) {
   const Model model = softmax_float_1::createTestModel();
-  const std::vector<Request> requests = createRequests(softmax_float_1::examples);
+  const std::vector<Request> requests = createRequests(softmax_float_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3118,12 +3118,12 @@
   generated_tests::Execute(device,
                            softmax_float_2::createTestModel,
                            softmax_float_2::is_ignored,
-                           softmax_float_2::examples);
+                           softmax_float_2::get_examples());
 }
 
 TEST_F(ValidationTest, softmax_float_2) {
   const Model model = softmax_float_2::createTestModel();
-  const std::vector<Request> requests = createRequests(softmax_float_2::examples);
+  const std::vector<Request> requests = createRequests(softmax_float_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3141,12 +3141,12 @@
   generated_tests::Execute(device,
                            softmax_quant8_1::createTestModel,
                            softmax_quant8_1::is_ignored,
-                           softmax_quant8_1::examples);
+                           softmax_quant8_1::get_examples());
 }
 
 TEST_F(ValidationTest, softmax_quant8_1) {
   const Model model = softmax_quant8_1::createTestModel();
-  const std::vector<Request> requests = createRequests(softmax_quant8_1::examples);
+  const std::vector<Request> requests = createRequests(softmax_quant8_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3164,12 +3164,12 @@
   generated_tests::Execute(device,
                            softmax_quant8_2::createTestModel,
                            softmax_quant8_2::is_ignored,
-                           softmax_quant8_2::examples);
+                           softmax_quant8_2::get_examples());
 }
 
 TEST_F(ValidationTest, softmax_quant8_2) {
   const Model model = softmax_quant8_2::createTestModel();
-  const std::vector<Request> requests = createRequests(softmax_quant8_2::examples);
+  const std::vector<Request> requests = createRequests(softmax_quant8_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3187,12 +3187,12 @@
   generated_tests::Execute(device,
                            space_to_depth_float_1::createTestModel,
                            space_to_depth_float_1::is_ignored,
-                           space_to_depth_float_1::examples);
+                           space_to_depth_float_1::get_examples());
 }
 
 TEST_F(ValidationTest, space_to_depth_float_1) {
   const Model model = space_to_depth_float_1::createTestModel();
-  const std::vector<Request> requests = createRequests(space_to_depth_float_1::examples);
+  const std::vector<Request> requests = createRequests(space_to_depth_float_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3210,12 +3210,12 @@
   generated_tests::Execute(device,
                            space_to_depth_float_2::createTestModel,
                            space_to_depth_float_2::is_ignored,
-                           space_to_depth_float_2::examples);
+                           space_to_depth_float_2::get_examples());
 }
 
 TEST_F(ValidationTest, space_to_depth_float_2) {
   const Model model = space_to_depth_float_2::createTestModel();
-  const std::vector<Request> requests = createRequests(space_to_depth_float_2::examples);
+  const std::vector<Request> requests = createRequests(space_to_depth_float_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3233,12 +3233,12 @@
   generated_tests::Execute(device,
                            space_to_depth_float_3::createTestModel,
                            space_to_depth_float_3::is_ignored,
-                           space_to_depth_float_3::examples);
+                           space_to_depth_float_3::get_examples());
 }
 
 TEST_F(ValidationTest, space_to_depth_float_3) {
   const Model model = space_to_depth_float_3::createTestModel();
-  const std::vector<Request> requests = createRequests(space_to_depth_float_3::examples);
+  const std::vector<Request> requests = createRequests(space_to_depth_float_3::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3256,12 +3256,12 @@
   generated_tests::Execute(device,
                            space_to_depth_quant8_1::createTestModel,
                            space_to_depth_quant8_1::is_ignored,
-                           space_to_depth_quant8_1::examples);
+                           space_to_depth_quant8_1::get_examples());
 }
 
 TEST_F(ValidationTest, space_to_depth_quant8_1) {
   const Model model = space_to_depth_quant8_1::createTestModel();
-  const std::vector<Request> requests = createRequests(space_to_depth_quant8_1::examples);
+  const std::vector<Request> requests = createRequests(space_to_depth_quant8_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3279,12 +3279,12 @@
   generated_tests::Execute(device,
                            space_to_depth_quant8_2::createTestModel,
                            space_to_depth_quant8_2::is_ignored,
-                           space_to_depth_quant8_2::examples);
+                           space_to_depth_quant8_2::get_examples());
 }
 
 TEST_F(ValidationTest, space_to_depth_quant8_2) {
   const Model model = space_to_depth_quant8_2::createTestModel();
-  const std::vector<Request> requests = createRequests(space_to_depth_quant8_2::examples);
+  const std::vector<Request> requests = createRequests(space_to_depth_quant8_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3302,12 +3302,12 @@
   generated_tests::Execute(device,
                            svdf::createTestModel,
                            svdf::is_ignored,
-                           svdf::examples);
+                           svdf::get_examples());
 }
 
 TEST_F(ValidationTest, svdf) {
   const Model model = svdf::createTestModel();
-  const std::vector<Request> requests = createRequests(svdf::examples);
+  const std::vector<Request> requests = createRequests(svdf::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3325,12 +3325,12 @@
   generated_tests::Execute(device,
                            svdf2::createTestModel,
                            svdf2::is_ignored,
-                           svdf2::examples);
+                           svdf2::get_examples());
 }
 
 TEST_F(ValidationTest, svdf2) {
   const Model model = svdf2::createTestModel();
-  const std::vector<Request> requests = createRequests(svdf2::examples);
+  const std::vector<Request> requests = createRequests(svdf2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3348,12 +3348,12 @@
   generated_tests::Execute(device,
                            svdf_state::createTestModel,
                            svdf_state::is_ignored,
-                           svdf_state::examples);
+                           svdf_state::get_examples());
 }
 
 TEST_F(ValidationTest, svdf_state) {
   const Model model = svdf_state::createTestModel();
-  const std::vector<Request> requests = createRequests(svdf_state::examples);
+  const std::vector<Request> requests = createRequests(svdf_state::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3371,12 +3371,12 @@
   generated_tests::Execute(device,
                            tanh::createTestModel,
                            tanh::is_ignored,
-                           tanh::examples);
+                           tanh::get_examples());
 }
 
 TEST_F(ValidationTest, tanh) {
   const Model model = tanh::createTestModel();
-  const std::vector<Request> requests = createRequests(tanh::examples);
+  const std::vector<Request> requests = createRequests(tanh::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
diff --git a/nn/runtime/test/generated/all_generated_V1_1_vts_tests.cpp b/nn/runtime/test/generated/all_generated_V1_1_vts_tests.cpp
index df3c673..1a7deaf 100644
--- a/nn/runtime/test/generated/all_generated_V1_1_vts_tests.cpp
+++ b/nn/runtime/test/generated/all_generated_V1_1_vts_tests.cpp
@@ -13,12 +13,12 @@
   generated_tests::Execute(device,
                            add_relaxed::createTestModel,
                            add_relaxed::is_ignored,
-                           add_relaxed::examples);
+                           add_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, add_relaxed) {
   const Model model = add_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(add_relaxed::examples);
+  const std::vector<Request> requests = createRequests(add_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -36,12 +36,12 @@
   generated_tests::Execute(device,
                            avg_pool_float_1_relaxed::createTestModel,
                            avg_pool_float_1_relaxed::is_ignored,
-                           avg_pool_float_1_relaxed::examples);
+                           avg_pool_float_1_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, avg_pool_float_1_relaxed) {
   const Model model = avg_pool_float_1_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(avg_pool_float_1_relaxed::examples);
+  const std::vector<Request> requests = createRequests(avg_pool_float_1_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -59,12 +59,12 @@
   generated_tests::Execute(device,
                            avg_pool_float_2_relaxed::createTestModel,
                            avg_pool_float_2_relaxed::is_ignored,
-                           avg_pool_float_2_relaxed::examples);
+                           avg_pool_float_2_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, avg_pool_float_2_relaxed) {
   const Model model = avg_pool_float_2_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(avg_pool_float_2_relaxed::examples);
+  const std::vector<Request> requests = createRequests(avg_pool_float_2_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -82,12 +82,12 @@
   generated_tests::Execute(device,
                            avg_pool_float_3_relaxed::createTestModel,
                            avg_pool_float_3_relaxed::is_ignored,
-                           avg_pool_float_3_relaxed::examples);
+                           avg_pool_float_3_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, avg_pool_float_3_relaxed) {
   const Model model = avg_pool_float_3_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(avg_pool_float_3_relaxed::examples);
+  const std::vector<Request> requests = createRequests(avg_pool_float_3_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -105,12 +105,12 @@
   generated_tests::Execute(device,
                            avg_pool_float_4_relaxed::createTestModel,
                            avg_pool_float_4_relaxed::is_ignored,
-                           avg_pool_float_4_relaxed::examples);
+                           avg_pool_float_4_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, avg_pool_float_4_relaxed) {
   const Model model = avg_pool_float_4_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(avg_pool_float_4_relaxed::examples);
+  const std::vector<Request> requests = createRequests(avg_pool_float_4_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -128,12 +128,12 @@
   generated_tests::Execute(device,
                            avg_pool_float_5_relaxed::createTestModel,
                            avg_pool_float_5_relaxed::is_ignored,
-                           avg_pool_float_5_relaxed::examples);
+                           avg_pool_float_5_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, avg_pool_float_5_relaxed) {
   const Model model = avg_pool_float_5_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(avg_pool_float_5_relaxed::examples);
+  const std::vector<Request> requests = createRequests(avg_pool_float_5_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -151,12 +151,12 @@
   generated_tests::Execute(device,
                            batch_to_space::createTestModel,
                            batch_to_space::is_ignored,
-                           batch_to_space::examples);
+                           batch_to_space::get_examples());
 }
 
 TEST_F(ValidationTest, batch_to_space) {
   const Model model = batch_to_space::createTestModel();
-  const std::vector<Request> requests = createRequests(batch_to_space::examples);
+  const std::vector<Request> requests = createRequests(batch_to_space::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -174,12 +174,12 @@
   generated_tests::Execute(device,
                            batch_to_space_float_1::createTestModel,
                            batch_to_space_float_1::is_ignored,
-                           batch_to_space_float_1::examples);
+                           batch_to_space_float_1::get_examples());
 }
 
 TEST_F(ValidationTest, batch_to_space_float_1) {
   const Model model = batch_to_space_float_1::createTestModel();
-  const std::vector<Request> requests = createRequests(batch_to_space_float_1::examples);
+  const std::vector<Request> requests = createRequests(batch_to_space_float_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -197,12 +197,12 @@
   generated_tests::Execute(device,
                            batch_to_space_float_1_relaxed::createTestModel,
                            batch_to_space_float_1_relaxed::is_ignored,
-                           batch_to_space_float_1_relaxed::examples);
+                           batch_to_space_float_1_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, batch_to_space_float_1_relaxed) {
   const Model model = batch_to_space_float_1_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(batch_to_space_float_1_relaxed::examples);
+  const std::vector<Request> requests = createRequests(batch_to_space_float_1_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -220,12 +220,12 @@
   generated_tests::Execute(device,
                            batch_to_space_quant8_1::createTestModel,
                            batch_to_space_quant8_1::is_ignored,
-                           batch_to_space_quant8_1::examples);
+                           batch_to_space_quant8_1::get_examples());
 }
 
 TEST_F(ValidationTest, batch_to_space_quant8_1) {
   const Model model = batch_to_space_quant8_1::createTestModel();
-  const std::vector<Request> requests = createRequests(batch_to_space_quant8_1::examples);
+  const std::vector<Request> requests = createRequests(batch_to_space_quant8_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -243,12 +243,12 @@
   generated_tests::Execute(device,
                            batch_to_space_relaxed::createTestModel,
                            batch_to_space_relaxed::is_ignored,
-                           batch_to_space_relaxed::examples);
+                           batch_to_space_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, batch_to_space_relaxed) {
   const Model model = batch_to_space_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(batch_to_space_relaxed::examples);
+  const std::vector<Request> requests = createRequests(batch_to_space_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -266,12 +266,12 @@
   generated_tests::Execute(device,
                            concat_float_1_relaxed::createTestModel,
                            concat_float_1_relaxed::is_ignored,
-                           concat_float_1_relaxed::examples);
+                           concat_float_1_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, concat_float_1_relaxed) {
   const Model model = concat_float_1_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(concat_float_1_relaxed::examples);
+  const std::vector<Request> requests = createRequests(concat_float_1_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -289,12 +289,12 @@
   generated_tests::Execute(device,
                            concat_float_2_relaxed::createTestModel,
                            concat_float_2_relaxed::is_ignored,
-                           concat_float_2_relaxed::examples);
+                           concat_float_2_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, concat_float_2_relaxed) {
   const Model model = concat_float_2_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(concat_float_2_relaxed::examples);
+  const std::vector<Request> requests = createRequests(concat_float_2_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -312,12 +312,12 @@
   generated_tests::Execute(device,
                            concat_float_3_relaxed::createTestModel,
                            concat_float_3_relaxed::is_ignored,
-                           concat_float_3_relaxed::examples);
+                           concat_float_3_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, concat_float_3_relaxed) {
   const Model model = concat_float_3_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(concat_float_3_relaxed::examples);
+  const std::vector<Request> requests = createRequests(concat_float_3_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -335,12 +335,12 @@
   generated_tests::Execute(device,
                            conv_1_h3_w2_SAME_relaxed::createTestModel,
                            conv_1_h3_w2_SAME_relaxed::is_ignored,
-                           conv_1_h3_w2_SAME_relaxed::examples);
+                           conv_1_h3_w2_SAME_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, conv_1_h3_w2_SAME_relaxed) {
   const Model model = conv_1_h3_w2_SAME_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_1_h3_w2_SAME_relaxed::examples);
+  const std::vector<Request> requests = createRequests(conv_1_h3_w2_SAME_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -358,12 +358,12 @@
   generated_tests::Execute(device,
                            conv_1_h3_w2_VALID_relaxed::createTestModel,
                            conv_1_h3_w2_VALID_relaxed::is_ignored,
-                           conv_1_h3_w2_VALID_relaxed::examples);
+                           conv_1_h3_w2_VALID_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, conv_1_h3_w2_VALID_relaxed) {
   const Model model = conv_1_h3_w2_VALID_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_1_h3_w2_VALID_relaxed::examples);
+  const std::vector<Request> requests = createRequests(conv_1_h3_w2_VALID_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -381,12 +381,12 @@
   generated_tests::Execute(device,
                            conv_3_h3_w2_SAME_relaxed::createTestModel,
                            conv_3_h3_w2_SAME_relaxed::is_ignored,
-                           conv_3_h3_w2_SAME_relaxed::examples);
+                           conv_3_h3_w2_SAME_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, conv_3_h3_w2_SAME_relaxed) {
   const Model model = conv_3_h3_w2_SAME_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_3_h3_w2_SAME_relaxed::examples);
+  const std::vector<Request> requests = createRequests(conv_3_h3_w2_SAME_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -404,12 +404,12 @@
   generated_tests::Execute(device,
                            conv_3_h3_w2_VALID_relaxed::createTestModel,
                            conv_3_h3_w2_VALID_relaxed::is_ignored,
-                           conv_3_h3_w2_VALID_relaxed::examples);
+                           conv_3_h3_w2_VALID_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, conv_3_h3_w2_VALID_relaxed) {
   const Model model = conv_3_h3_w2_VALID_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_3_h3_w2_VALID_relaxed::examples);
+  const std::vector<Request> requests = createRequests(conv_3_h3_w2_VALID_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -427,12 +427,12 @@
   generated_tests::Execute(device,
                            conv_float_2_relaxed::createTestModel,
                            conv_float_2_relaxed::is_ignored,
-                           conv_float_2_relaxed::examples);
+                           conv_float_2_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, conv_float_2_relaxed) {
   const Model model = conv_float_2_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_float_2_relaxed::examples);
+  const std::vector<Request> requests = createRequests(conv_float_2_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -450,12 +450,12 @@
   generated_tests::Execute(device,
                            conv_float_channels_relaxed::createTestModel,
                            conv_float_channels_relaxed::is_ignored,
-                           conv_float_channels_relaxed::examples);
+                           conv_float_channels_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, conv_float_channels_relaxed) {
   const Model model = conv_float_channels_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_float_channels_relaxed::examples);
+  const std::vector<Request> requests = createRequests(conv_float_channels_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -473,12 +473,12 @@
   generated_tests::Execute(device,
                            conv_float_channels_weights_as_inputs_relaxed::createTestModel,
                            conv_float_channels_weights_as_inputs_relaxed::is_ignored,
-                           conv_float_channels_weights_as_inputs_relaxed::examples);
+                           conv_float_channels_weights_as_inputs_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, conv_float_channels_weights_as_inputs_relaxed) {
   const Model model = conv_float_channels_weights_as_inputs_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_float_channels_weights_as_inputs_relaxed::examples);
+  const std::vector<Request> requests = createRequests(conv_float_channels_weights_as_inputs_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -496,12 +496,12 @@
   generated_tests::Execute(device,
                            conv_float_large_relaxed::createTestModel,
                            conv_float_large_relaxed::is_ignored,
-                           conv_float_large_relaxed::examples);
+                           conv_float_large_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, conv_float_large_relaxed) {
   const Model model = conv_float_large_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_float_large_relaxed::examples);
+  const std::vector<Request> requests = createRequests(conv_float_large_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -519,12 +519,12 @@
   generated_tests::Execute(device,
                            conv_float_large_weights_as_inputs_relaxed::createTestModel,
                            conv_float_large_weights_as_inputs_relaxed::is_ignored,
-                           conv_float_large_weights_as_inputs_relaxed::examples);
+                           conv_float_large_weights_as_inputs_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, conv_float_large_weights_as_inputs_relaxed) {
   const Model model = conv_float_large_weights_as_inputs_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_float_large_weights_as_inputs_relaxed::examples);
+  const std::vector<Request> requests = createRequests(conv_float_large_weights_as_inputs_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -542,12 +542,12 @@
   generated_tests::Execute(device,
                            conv_float_relaxed::createTestModel,
                            conv_float_relaxed::is_ignored,
-                           conv_float_relaxed::examples);
+                           conv_float_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, conv_float_relaxed) {
   const Model model = conv_float_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_float_relaxed::examples);
+  const std::vector<Request> requests = createRequests(conv_float_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -565,12 +565,12 @@
   generated_tests::Execute(device,
                            conv_float_weights_as_inputs_relaxed::createTestModel,
                            conv_float_weights_as_inputs_relaxed::is_ignored,
-                           conv_float_weights_as_inputs_relaxed::examples);
+                           conv_float_weights_as_inputs_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, conv_float_weights_as_inputs_relaxed) {
   const Model model = conv_float_weights_as_inputs_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(conv_float_weights_as_inputs_relaxed::examples);
+  const std::vector<Request> requests = createRequests(conv_float_weights_as_inputs_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -588,12 +588,12 @@
   generated_tests::Execute(device,
                            depth_to_space_float_1_relaxed::createTestModel,
                            depth_to_space_float_1_relaxed::is_ignored,
-                           depth_to_space_float_1_relaxed::examples);
+                           depth_to_space_float_1_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, depth_to_space_float_1_relaxed) {
   const Model model = depth_to_space_float_1_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(depth_to_space_float_1_relaxed::examples);
+  const std::vector<Request> requests = createRequests(depth_to_space_float_1_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -611,12 +611,12 @@
   generated_tests::Execute(device,
                            depth_to_space_float_2_relaxed::createTestModel,
                            depth_to_space_float_2_relaxed::is_ignored,
-                           depth_to_space_float_2_relaxed::examples);
+                           depth_to_space_float_2_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, depth_to_space_float_2_relaxed) {
   const Model model = depth_to_space_float_2_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(depth_to_space_float_2_relaxed::examples);
+  const std::vector<Request> requests = createRequests(depth_to_space_float_2_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -634,12 +634,12 @@
   generated_tests::Execute(device,
                            depth_to_space_float_3_relaxed::createTestModel,
                            depth_to_space_float_3_relaxed::is_ignored,
-                           depth_to_space_float_3_relaxed::examples);
+                           depth_to_space_float_3_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, depth_to_space_float_3_relaxed) {
   const Model model = depth_to_space_float_3_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(depth_to_space_float_3_relaxed::examples);
+  const std::vector<Request> requests = createRequests(depth_to_space_float_3_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -657,12 +657,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_float_2_relaxed::createTestModel,
                            depthwise_conv2d_float_2_relaxed::is_ignored,
-                           depthwise_conv2d_float_2_relaxed::examples);
+                           depthwise_conv2d_float_2_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_float_2_relaxed) {
   const Model model = depthwise_conv2d_float_2_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_float_2_relaxed::examples);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_float_2_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -680,12 +680,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_float_large_2_relaxed::createTestModel,
                            depthwise_conv2d_float_large_2_relaxed::is_ignored,
-                           depthwise_conv2d_float_large_2_relaxed::examples);
+                           depthwise_conv2d_float_large_2_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_float_large_2_relaxed) {
   const Model model = depthwise_conv2d_float_large_2_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_float_large_2_relaxed::examples);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_float_large_2_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -703,12 +703,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_float_large_2_weights_as_inputs_relaxed::createTestModel,
                            depthwise_conv2d_float_large_2_weights_as_inputs_relaxed::is_ignored,
-                           depthwise_conv2d_float_large_2_weights_as_inputs_relaxed::examples);
+                           depthwise_conv2d_float_large_2_weights_as_inputs_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_float_large_2_weights_as_inputs_relaxed) {
   const Model model = depthwise_conv2d_float_large_2_weights_as_inputs_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_float_large_2_weights_as_inputs_relaxed::examples);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_float_large_2_weights_as_inputs_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -726,12 +726,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_float_large_relaxed::createTestModel,
                            depthwise_conv2d_float_large_relaxed::is_ignored,
-                           depthwise_conv2d_float_large_relaxed::examples);
+                           depthwise_conv2d_float_large_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_float_large_relaxed) {
   const Model model = depthwise_conv2d_float_large_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_float_large_relaxed::examples);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_float_large_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -749,12 +749,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_float_large_weights_as_inputs_relaxed::createTestModel,
                            depthwise_conv2d_float_large_weights_as_inputs_relaxed::is_ignored,
-                           depthwise_conv2d_float_large_weights_as_inputs_relaxed::examples);
+                           depthwise_conv2d_float_large_weights_as_inputs_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_float_large_weights_as_inputs_relaxed) {
   const Model model = depthwise_conv2d_float_large_weights_as_inputs_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_float_large_weights_as_inputs_relaxed::examples);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_float_large_weights_as_inputs_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -772,12 +772,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_float_relaxed::createTestModel,
                            depthwise_conv2d_float_relaxed::is_ignored,
-                           depthwise_conv2d_float_relaxed::examples);
+                           depthwise_conv2d_float_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_float_relaxed) {
   const Model model = depthwise_conv2d_float_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_float_relaxed::examples);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_float_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -795,12 +795,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_float_weights_as_inputs_relaxed::createTestModel,
                            depthwise_conv2d_float_weights_as_inputs_relaxed::is_ignored,
-                           depthwise_conv2d_float_weights_as_inputs_relaxed::examples);
+                           depthwise_conv2d_float_weights_as_inputs_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_float_weights_as_inputs_relaxed) {
   const Model model = depthwise_conv2d_float_weights_as_inputs_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_float_weights_as_inputs_relaxed::examples);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_float_weights_as_inputs_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -818,12 +818,12 @@
   generated_tests::Execute(device,
                            depthwise_conv_relaxed::createTestModel,
                            depthwise_conv_relaxed::is_ignored,
-                           depthwise_conv_relaxed::examples);
+                           depthwise_conv_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, depthwise_conv_relaxed) {
   const Model model = depthwise_conv_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(depthwise_conv_relaxed::examples);
+  const std::vector<Request> requests = createRequests(depthwise_conv_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -841,12 +841,12 @@
   generated_tests::Execute(device,
                            dequantize_relaxed::createTestModel,
                            dequantize_relaxed::is_ignored,
-                           dequantize_relaxed::examples);
+                           dequantize_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, dequantize_relaxed) {
   const Model model = dequantize_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(dequantize_relaxed::examples);
+  const std::vector<Request> requests = createRequests(dequantize_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -864,12 +864,12 @@
   generated_tests::Execute(device,
                            div::createTestModel,
                            div::is_ignored,
-                           div::examples);
+                           div::get_examples());
 }
 
 TEST_F(ValidationTest, div) {
   const Model model = div::createTestModel();
-  const std::vector<Request> requests = createRequests(div::examples);
+  const std::vector<Request> requests = createRequests(div::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -887,12 +887,12 @@
   generated_tests::Execute(device,
                            div_broadcast_float::createTestModel,
                            div_broadcast_float::is_ignored,
-                           div_broadcast_float::examples);
+                           div_broadcast_float::get_examples());
 }
 
 TEST_F(ValidationTest, div_broadcast_float) {
   const Model model = div_broadcast_float::createTestModel();
-  const std::vector<Request> requests = createRequests(div_broadcast_float::examples);
+  const std::vector<Request> requests = createRequests(div_broadcast_float::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -910,12 +910,12 @@
   generated_tests::Execute(device,
                            div_broadcast_float_relaxed::createTestModel,
                            div_broadcast_float_relaxed::is_ignored,
-                           div_broadcast_float_relaxed::examples);
+                           div_broadcast_float_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, div_broadcast_float_relaxed) {
   const Model model = div_broadcast_float_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(div_broadcast_float_relaxed::examples);
+  const std::vector<Request> requests = createRequests(div_broadcast_float_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -933,12 +933,12 @@
   generated_tests::Execute(device,
                            div_relaxed::createTestModel,
                            div_relaxed::is_ignored,
-                           div_relaxed::examples);
+                           div_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, div_relaxed) {
   const Model model = div_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(div_relaxed::examples);
+  const std::vector<Request> requests = createRequests(div_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -956,12 +956,12 @@
   generated_tests::Execute(device,
                            embedding_lookup_relaxed::createTestModel,
                            embedding_lookup_relaxed::is_ignored,
-                           embedding_lookup_relaxed::examples);
+                           embedding_lookup_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, embedding_lookup_relaxed) {
   const Model model = embedding_lookup_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(embedding_lookup_relaxed::examples);
+  const std::vector<Request> requests = createRequests(embedding_lookup_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -979,12 +979,12 @@
   generated_tests::Execute(device,
                            floor_relaxed::createTestModel,
                            floor_relaxed::is_ignored,
-                           floor_relaxed::examples);
+                           floor_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, floor_relaxed) {
   const Model model = floor_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(floor_relaxed::examples);
+  const std::vector<Request> requests = createRequests(floor_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1002,12 +1002,12 @@
   generated_tests::Execute(device,
                            fully_connected_float_2_relaxed::createTestModel,
                            fully_connected_float_2_relaxed::is_ignored,
-                           fully_connected_float_2_relaxed::examples);
+                           fully_connected_float_2_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, fully_connected_float_2_relaxed) {
   const Model model = fully_connected_float_2_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(fully_connected_float_2_relaxed::examples);
+  const std::vector<Request> requests = createRequests(fully_connected_float_2_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1025,12 +1025,12 @@
   generated_tests::Execute(device,
                            fully_connected_float_4d_simple::createTestModel,
                            fully_connected_float_4d_simple::is_ignored,
-                           fully_connected_float_4d_simple::examples);
+                           fully_connected_float_4d_simple::get_examples());
 }
 
 TEST_F(ValidationTest, fully_connected_float_4d_simple) {
   const Model model = fully_connected_float_4d_simple::createTestModel();
-  const std::vector<Request> requests = createRequests(fully_connected_float_4d_simple::examples);
+  const std::vector<Request> requests = createRequests(fully_connected_float_4d_simple::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1048,12 +1048,12 @@
   generated_tests::Execute(device,
                            fully_connected_float_4d_simple_relaxed::createTestModel,
                            fully_connected_float_4d_simple_relaxed::is_ignored,
-                           fully_connected_float_4d_simple_relaxed::examples);
+                           fully_connected_float_4d_simple_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, fully_connected_float_4d_simple_relaxed) {
   const Model model = fully_connected_float_4d_simple_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(fully_connected_float_4d_simple_relaxed::examples);
+  const std::vector<Request> requests = createRequests(fully_connected_float_4d_simple_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1071,12 +1071,12 @@
   generated_tests::Execute(device,
                            fully_connected_float_large_relaxed::createTestModel,
                            fully_connected_float_large_relaxed::is_ignored,
-                           fully_connected_float_large_relaxed::examples);
+                           fully_connected_float_large_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, fully_connected_float_large_relaxed) {
   const Model model = fully_connected_float_large_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(fully_connected_float_large_relaxed::examples);
+  const std::vector<Request> requests = createRequests(fully_connected_float_large_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1094,12 +1094,12 @@
   generated_tests::Execute(device,
                            fully_connected_float_large_weights_as_inputs_relaxed::createTestModel,
                            fully_connected_float_large_weights_as_inputs_relaxed::is_ignored,
-                           fully_connected_float_large_weights_as_inputs_relaxed::examples);
+                           fully_connected_float_large_weights_as_inputs_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, fully_connected_float_large_weights_as_inputs_relaxed) {
   const Model model = fully_connected_float_large_weights_as_inputs_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(fully_connected_float_large_weights_as_inputs_relaxed::examples);
+  const std::vector<Request> requests = createRequests(fully_connected_float_large_weights_as_inputs_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1117,12 +1117,12 @@
   generated_tests::Execute(device,
                            fully_connected_float_relaxed::createTestModel,
                            fully_connected_float_relaxed::is_ignored,
-                           fully_connected_float_relaxed::examples);
+                           fully_connected_float_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, fully_connected_float_relaxed) {
   const Model model = fully_connected_float_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(fully_connected_float_relaxed::examples);
+  const std::vector<Request> requests = createRequests(fully_connected_float_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1140,12 +1140,12 @@
   generated_tests::Execute(device,
                            fully_connected_float_weights_as_inputs_relaxed::createTestModel,
                            fully_connected_float_weights_as_inputs_relaxed::is_ignored,
-                           fully_connected_float_weights_as_inputs_relaxed::examples);
+                           fully_connected_float_weights_as_inputs_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, fully_connected_float_weights_as_inputs_relaxed) {
   const Model model = fully_connected_float_weights_as_inputs_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(fully_connected_float_weights_as_inputs_relaxed::examples);
+  const std::vector<Request> requests = createRequests(fully_connected_float_weights_as_inputs_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1163,12 +1163,12 @@
   generated_tests::Execute(device,
                            hashtable_lookup_float_relaxed::createTestModel,
                            hashtable_lookup_float_relaxed::is_ignored,
-                           hashtable_lookup_float_relaxed::examples);
+                           hashtable_lookup_float_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, hashtable_lookup_float_relaxed) {
   const Model model = hashtable_lookup_float_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(hashtable_lookup_float_relaxed::examples);
+  const std::vector<Request> requests = createRequests(hashtable_lookup_float_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1186,12 +1186,12 @@
   generated_tests::Execute(device,
                            l2_normalization_2_relaxed::createTestModel,
                            l2_normalization_2_relaxed::is_ignored,
-                           l2_normalization_2_relaxed::examples);
+                           l2_normalization_2_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, l2_normalization_2_relaxed) {
   const Model model = l2_normalization_2_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(l2_normalization_2_relaxed::examples);
+  const std::vector<Request> requests = createRequests(l2_normalization_2_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1209,12 +1209,12 @@
   generated_tests::Execute(device,
                            l2_normalization_large_relaxed::createTestModel,
                            l2_normalization_large_relaxed::is_ignored,
-                           l2_normalization_large_relaxed::examples);
+                           l2_normalization_large_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, l2_normalization_large_relaxed) {
   const Model model = l2_normalization_large_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(l2_normalization_large_relaxed::examples);
+  const std::vector<Request> requests = createRequests(l2_normalization_large_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1232,12 +1232,12 @@
   generated_tests::Execute(device,
                            l2_normalization_relaxed::createTestModel,
                            l2_normalization_relaxed::is_ignored,
-                           l2_normalization_relaxed::examples);
+                           l2_normalization_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, l2_normalization_relaxed) {
   const Model model = l2_normalization_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(l2_normalization_relaxed::examples);
+  const std::vector<Request> requests = createRequests(l2_normalization_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1255,12 +1255,12 @@
   generated_tests::Execute(device,
                            l2_pool_float_2_relaxed::createTestModel,
                            l2_pool_float_2_relaxed::is_ignored,
-                           l2_pool_float_2_relaxed::examples);
+                           l2_pool_float_2_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, l2_pool_float_2_relaxed) {
   const Model model = l2_pool_float_2_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(l2_pool_float_2_relaxed::examples);
+  const std::vector<Request> requests = createRequests(l2_pool_float_2_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1278,12 +1278,12 @@
   generated_tests::Execute(device,
                            l2_pool_float_large_relaxed::createTestModel,
                            l2_pool_float_large_relaxed::is_ignored,
-                           l2_pool_float_large_relaxed::examples);
+                           l2_pool_float_large_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, l2_pool_float_large_relaxed) {
   const Model model = l2_pool_float_large_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(l2_pool_float_large_relaxed::examples);
+  const std::vector<Request> requests = createRequests(l2_pool_float_large_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1301,12 +1301,12 @@
   generated_tests::Execute(device,
                            l2_pool_float_relaxed::createTestModel,
                            l2_pool_float_relaxed::is_ignored,
-                           l2_pool_float_relaxed::examples);
+                           l2_pool_float_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, l2_pool_float_relaxed) {
   const Model model = l2_pool_float_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(l2_pool_float_relaxed::examples);
+  const std::vector<Request> requests = createRequests(l2_pool_float_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1324,12 +1324,12 @@
   generated_tests::Execute(device,
                            local_response_norm_float_1_relaxed::createTestModel,
                            local_response_norm_float_1_relaxed::is_ignored,
-                           local_response_norm_float_1_relaxed::examples);
+                           local_response_norm_float_1_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, local_response_norm_float_1_relaxed) {
   const Model model = local_response_norm_float_1_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(local_response_norm_float_1_relaxed::examples);
+  const std::vector<Request> requests = createRequests(local_response_norm_float_1_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1347,12 +1347,12 @@
   generated_tests::Execute(device,
                            local_response_norm_float_2_relaxed::createTestModel,
                            local_response_norm_float_2_relaxed::is_ignored,
-                           local_response_norm_float_2_relaxed::examples);
+                           local_response_norm_float_2_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, local_response_norm_float_2_relaxed) {
   const Model model = local_response_norm_float_2_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(local_response_norm_float_2_relaxed::examples);
+  const std::vector<Request> requests = createRequests(local_response_norm_float_2_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1370,12 +1370,12 @@
   generated_tests::Execute(device,
                            local_response_norm_float_3_relaxed::createTestModel,
                            local_response_norm_float_3_relaxed::is_ignored,
-                           local_response_norm_float_3_relaxed::examples);
+                           local_response_norm_float_3_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, local_response_norm_float_3_relaxed) {
   const Model model = local_response_norm_float_3_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(local_response_norm_float_3_relaxed::examples);
+  const std::vector<Request> requests = createRequests(local_response_norm_float_3_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1393,12 +1393,12 @@
   generated_tests::Execute(device,
                            local_response_norm_float_4_relaxed::createTestModel,
                            local_response_norm_float_4_relaxed::is_ignored,
-                           local_response_norm_float_4_relaxed::examples);
+                           local_response_norm_float_4_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, local_response_norm_float_4_relaxed) {
   const Model model = local_response_norm_float_4_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(local_response_norm_float_4_relaxed::examples);
+  const std::vector<Request> requests = createRequests(local_response_norm_float_4_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1416,12 +1416,12 @@
   generated_tests::Execute(device,
                            logistic_float_1_relaxed::createTestModel,
                            logistic_float_1_relaxed::is_ignored,
-                           logistic_float_1_relaxed::examples);
+                           logistic_float_1_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, logistic_float_1_relaxed) {
   const Model model = logistic_float_1_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(logistic_float_1_relaxed::examples);
+  const std::vector<Request> requests = createRequests(logistic_float_1_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1439,12 +1439,12 @@
   generated_tests::Execute(device,
                            logistic_float_2_relaxed::createTestModel,
                            logistic_float_2_relaxed::is_ignored,
-                           logistic_float_2_relaxed::examples);
+                           logistic_float_2_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, logistic_float_2_relaxed) {
   const Model model = logistic_float_2_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(logistic_float_2_relaxed::examples);
+  const std::vector<Request> requests = createRequests(logistic_float_2_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1462,12 +1462,12 @@
   generated_tests::Execute(device,
                            lsh_projection_2_relaxed::createTestModel,
                            lsh_projection_2_relaxed::is_ignored,
-                           lsh_projection_2_relaxed::examples);
+                           lsh_projection_2_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, lsh_projection_2_relaxed) {
   const Model model = lsh_projection_2_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(lsh_projection_2_relaxed::examples);
+  const std::vector<Request> requests = createRequests(lsh_projection_2_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1485,12 +1485,12 @@
   generated_tests::Execute(device,
                            lsh_projection_relaxed::createTestModel,
                            lsh_projection_relaxed::is_ignored,
-                           lsh_projection_relaxed::examples);
+                           lsh_projection_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, lsh_projection_relaxed) {
   const Model model = lsh_projection_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(lsh_projection_relaxed::examples);
+  const std::vector<Request> requests = createRequests(lsh_projection_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1508,12 +1508,12 @@
   generated_tests::Execute(device,
                            lsh_projection_weights_as_inputs_relaxed::createTestModel,
                            lsh_projection_weights_as_inputs_relaxed::is_ignored,
-                           lsh_projection_weights_as_inputs_relaxed::examples);
+                           lsh_projection_weights_as_inputs_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, lsh_projection_weights_as_inputs_relaxed) {
   const Model model = lsh_projection_weights_as_inputs_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(lsh_projection_weights_as_inputs_relaxed::examples);
+  const std::vector<Request> requests = createRequests(lsh_projection_weights_as_inputs_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1531,12 +1531,12 @@
   generated_tests::Execute(device,
                            lstm2_relaxed::createTestModel,
                            lstm2_relaxed::is_ignored,
-                           lstm2_relaxed::examples);
+                           lstm2_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, lstm2_relaxed) {
   const Model model = lstm2_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(lstm2_relaxed::examples);
+  const std::vector<Request> requests = createRequests(lstm2_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1554,12 +1554,12 @@
   generated_tests::Execute(device,
                            lstm2_state2_relaxed::createTestModel,
                            lstm2_state2_relaxed::is_ignored,
-                           lstm2_state2_relaxed::examples);
+                           lstm2_state2_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, lstm2_state2_relaxed) {
   const Model model = lstm2_state2_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(lstm2_state2_relaxed::examples);
+  const std::vector<Request> requests = createRequests(lstm2_state2_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1577,12 +1577,12 @@
   generated_tests::Execute(device,
                            lstm2_state_relaxed::createTestModel,
                            lstm2_state_relaxed::is_ignored,
-                           lstm2_state_relaxed::examples);
+                           lstm2_state_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, lstm2_state_relaxed) {
   const Model model = lstm2_state_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(lstm2_state_relaxed::examples);
+  const std::vector<Request> requests = createRequests(lstm2_state_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1600,12 +1600,12 @@
   generated_tests::Execute(device,
                            lstm3_relaxed::createTestModel,
                            lstm3_relaxed::is_ignored,
-                           lstm3_relaxed::examples);
+                           lstm3_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, lstm3_relaxed) {
   const Model model = lstm3_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(lstm3_relaxed::examples);
+  const std::vector<Request> requests = createRequests(lstm3_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1623,12 +1623,12 @@
   generated_tests::Execute(device,
                            lstm3_state2_relaxed::createTestModel,
                            lstm3_state2_relaxed::is_ignored,
-                           lstm3_state2_relaxed::examples);
+                           lstm3_state2_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, lstm3_state2_relaxed) {
   const Model model = lstm3_state2_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(lstm3_state2_relaxed::examples);
+  const std::vector<Request> requests = createRequests(lstm3_state2_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1646,12 +1646,12 @@
   generated_tests::Execute(device,
                            lstm3_state3_relaxed::createTestModel,
                            lstm3_state3_relaxed::is_ignored,
-                           lstm3_state3_relaxed::examples);
+                           lstm3_state3_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, lstm3_state3_relaxed) {
   const Model model = lstm3_state3_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(lstm3_state3_relaxed::examples);
+  const std::vector<Request> requests = createRequests(lstm3_state3_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1669,12 +1669,12 @@
   generated_tests::Execute(device,
                            lstm3_state_relaxed::createTestModel,
                            lstm3_state_relaxed::is_ignored,
-                           lstm3_state_relaxed::examples);
+                           lstm3_state_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, lstm3_state_relaxed) {
   const Model model = lstm3_state_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(lstm3_state_relaxed::examples);
+  const std::vector<Request> requests = createRequests(lstm3_state_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1692,12 +1692,12 @@
   generated_tests::Execute(device,
                            lstm_relaxed::createTestModel,
                            lstm_relaxed::is_ignored,
-                           lstm_relaxed::examples);
+                           lstm_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, lstm_relaxed) {
   const Model model = lstm_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(lstm_relaxed::examples);
+  const std::vector<Request> requests = createRequests(lstm_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1715,12 +1715,12 @@
   generated_tests::Execute(device,
                            lstm_state2_relaxed::createTestModel,
                            lstm_state2_relaxed::is_ignored,
-                           lstm_state2_relaxed::examples);
+                           lstm_state2_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, lstm_state2_relaxed) {
   const Model model = lstm_state2_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(lstm_state2_relaxed::examples);
+  const std::vector<Request> requests = createRequests(lstm_state2_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1738,12 +1738,12 @@
   generated_tests::Execute(device,
                            lstm_state_relaxed::createTestModel,
                            lstm_state_relaxed::is_ignored,
-                           lstm_state_relaxed::examples);
+                           lstm_state_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, lstm_state_relaxed) {
   const Model model = lstm_state_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(lstm_state_relaxed::examples);
+  const std::vector<Request> requests = createRequests(lstm_state_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1761,12 +1761,12 @@
   generated_tests::Execute(device,
                            max_pool_float_1_relaxed::createTestModel,
                            max_pool_float_1_relaxed::is_ignored,
-                           max_pool_float_1_relaxed::examples);
+                           max_pool_float_1_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, max_pool_float_1_relaxed) {
   const Model model = max_pool_float_1_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(max_pool_float_1_relaxed::examples);
+  const std::vector<Request> requests = createRequests(max_pool_float_1_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1784,12 +1784,12 @@
   generated_tests::Execute(device,
                            max_pool_float_2_relaxed::createTestModel,
                            max_pool_float_2_relaxed::is_ignored,
-                           max_pool_float_2_relaxed::examples);
+                           max_pool_float_2_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, max_pool_float_2_relaxed) {
   const Model model = max_pool_float_2_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(max_pool_float_2_relaxed::examples);
+  const std::vector<Request> requests = createRequests(max_pool_float_2_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1807,12 +1807,12 @@
   generated_tests::Execute(device,
                            max_pool_float_3_relaxed::createTestModel,
                            max_pool_float_3_relaxed::is_ignored,
-                           max_pool_float_3_relaxed::examples);
+                           max_pool_float_3_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, max_pool_float_3_relaxed) {
   const Model model = max_pool_float_3_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(max_pool_float_3_relaxed::examples);
+  const std::vector<Request> requests = createRequests(max_pool_float_3_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1830,12 +1830,12 @@
   generated_tests::Execute(device,
                            max_pool_float_4_relaxed::createTestModel,
                            max_pool_float_4_relaxed::is_ignored,
-                           max_pool_float_4_relaxed::examples);
+                           max_pool_float_4_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, max_pool_float_4_relaxed) {
   const Model model = max_pool_float_4_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(max_pool_float_4_relaxed::examples);
+  const std::vector<Request> requests = createRequests(max_pool_float_4_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1853,12 +1853,12 @@
   generated_tests::Execute(device,
                            mean::createTestModel,
                            mean::is_ignored,
-                           mean::examples);
+                           mean::get_examples());
 }
 
 TEST_F(ValidationTest, mean) {
   const Model model = mean::createTestModel();
-  const std::vector<Request> requests = createRequests(mean::examples);
+  const std::vector<Request> requests = createRequests(mean::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1876,12 +1876,12 @@
   generated_tests::Execute(device,
                            mean_float_1::createTestModel,
                            mean_float_1::is_ignored,
-                           mean_float_1::examples);
+                           mean_float_1::get_examples());
 }
 
 TEST_F(ValidationTest, mean_float_1) {
   const Model model = mean_float_1::createTestModel();
-  const std::vector<Request> requests = createRequests(mean_float_1::examples);
+  const std::vector<Request> requests = createRequests(mean_float_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1899,12 +1899,12 @@
   generated_tests::Execute(device,
                            mean_float_1_relaxed::createTestModel,
                            mean_float_1_relaxed::is_ignored,
-                           mean_float_1_relaxed::examples);
+                           mean_float_1_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, mean_float_1_relaxed) {
   const Model model = mean_float_1_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(mean_float_1_relaxed::examples);
+  const std::vector<Request> requests = createRequests(mean_float_1_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1922,12 +1922,12 @@
   generated_tests::Execute(device,
                            mean_float_2::createTestModel,
                            mean_float_2::is_ignored,
-                           mean_float_2::examples);
+                           mean_float_2::get_examples());
 }
 
 TEST_F(ValidationTest, mean_float_2) {
   const Model model = mean_float_2::createTestModel();
-  const std::vector<Request> requests = createRequests(mean_float_2::examples);
+  const std::vector<Request> requests = createRequests(mean_float_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1945,12 +1945,12 @@
   generated_tests::Execute(device,
                            mean_float_2_relaxed::createTestModel,
                            mean_float_2_relaxed::is_ignored,
-                           mean_float_2_relaxed::examples);
+                           mean_float_2_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, mean_float_2_relaxed) {
   const Model model = mean_float_2_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(mean_float_2_relaxed::examples);
+  const std::vector<Request> requests = createRequests(mean_float_2_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1968,12 +1968,12 @@
   generated_tests::Execute(device,
                            mean_quant8_1::createTestModel,
                            mean_quant8_1::is_ignored,
-                           mean_quant8_1::examples);
+                           mean_quant8_1::get_examples());
 }
 
 TEST_F(ValidationTest, mean_quant8_1) {
   const Model model = mean_quant8_1::createTestModel();
-  const std::vector<Request> requests = createRequests(mean_quant8_1::examples);
+  const std::vector<Request> requests = createRequests(mean_quant8_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1991,12 +1991,12 @@
   generated_tests::Execute(device,
                            mean_quant8_2::createTestModel,
                            mean_quant8_2::is_ignored,
-                           mean_quant8_2::examples);
+                           mean_quant8_2::get_examples());
 }
 
 TEST_F(ValidationTest, mean_quant8_2) {
   const Model model = mean_quant8_2::createTestModel();
-  const std::vector<Request> requests = createRequests(mean_quant8_2::examples);
+  const std::vector<Request> requests = createRequests(mean_quant8_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2014,12 +2014,12 @@
   generated_tests::Execute(device,
                            mean_relaxed::createTestModel,
                            mean_relaxed::is_ignored,
-                           mean_relaxed::examples);
+                           mean_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, mean_relaxed) {
   const Model model = mean_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(mean_relaxed::examples);
+  const std::vector<Request> requests = createRequests(mean_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2037,12 +2037,12 @@
   generated_tests::Execute(device,
                            mobilenet_224_gender_basic_fixed_relaxed::createTestModel,
                            mobilenet_224_gender_basic_fixed_relaxed::is_ignored,
-                           mobilenet_224_gender_basic_fixed_relaxed::examples);
+                           mobilenet_224_gender_basic_fixed_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, mobilenet_224_gender_basic_fixed_relaxed) {
   const Model model = mobilenet_224_gender_basic_fixed_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(mobilenet_224_gender_basic_fixed_relaxed::examples);
+  const std::vector<Request> requests = createRequests(mobilenet_224_gender_basic_fixed_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2060,12 +2060,12 @@
   generated_tests::Execute(device,
                            mul_relaxed::createTestModel,
                            mul_relaxed::is_ignored,
-                           mul_relaxed::examples);
+                           mul_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, mul_relaxed) {
   const Model model = mul_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(mul_relaxed::examples);
+  const std::vector<Request> requests = createRequests(mul_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2083,12 +2083,12 @@
   generated_tests::Execute(device,
                            mul_relu_relaxed::createTestModel,
                            mul_relu_relaxed::is_ignored,
-                           mul_relu_relaxed::examples);
+                           mul_relu_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, mul_relu_relaxed) {
   const Model model = mul_relu_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(mul_relu_relaxed::examples);
+  const std::vector<Request> requests = createRequests(mul_relu_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2106,12 +2106,12 @@
   generated_tests::Execute(device,
                            pad::createTestModel,
                            pad::is_ignored,
-                           pad::examples);
+                           pad::get_examples());
 }
 
 TEST_F(ValidationTest, pad) {
   const Model model = pad::createTestModel();
-  const std::vector<Request> requests = createRequests(pad::examples);
+  const std::vector<Request> requests = createRequests(pad::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2129,12 +2129,12 @@
   generated_tests::Execute(device,
                            pad_float_1::createTestModel,
                            pad_float_1::is_ignored,
-                           pad_float_1::examples);
+                           pad_float_1::get_examples());
 }
 
 TEST_F(ValidationTest, pad_float_1) {
   const Model model = pad_float_1::createTestModel();
-  const std::vector<Request> requests = createRequests(pad_float_1::examples);
+  const std::vector<Request> requests = createRequests(pad_float_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2152,12 +2152,12 @@
   generated_tests::Execute(device,
                            pad_float_1_relaxed::createTestModel,
                            pad_float_1_relaxed::is_ignored,
-                           pad_float_1_relaxed::examples);
+                           pad_float_1_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, pad_float_1_relaxed) {
   const Model model = pad_float_1_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(pad_float_1_relaxed::examples);
+  const std::vector<Request> requests = createRequests(pad_float_1_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2175,12 +2175,12 @@
   generated_tests::Execute(device,
                            pad_relaxed::createTestModel,
                            pad_relaxed::is_ignored,
-                           pad_relaxed::examples);
+                           pad_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, pad_relaxed) {
   const Model model = pad_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(pad_relaxed::examples);
+  const std::vector<Request> requests = createRequests(pad_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2198,12 +2198,12 @@
   generated_tests::Execute(device,
                            relu1_float_1_relaxed::createTestModel,
                            relu1_float_1_relaxed::is_ignored,
-                           relu1_float_1_relaxed::examples);
+                           relu1_float_1_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, relu1_float_1_relaxed) {
   const Model model = relu1_float_1_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(relu1_float_1_relaxed::examples);
+  const std::vector<Request> requests = createRequests(relu1_float_1_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2221,12 +2221,12 @@
   generated_tests::Execute(device,
                            relu1_float_2_relaxed::createTestModel,
                            relu1_float_2_relaxed::is_ignored,
-                           relu1_float_2_relaxed::examples);
+                           relu1_float_2_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, relu1_float_2_relaxed) {
   const Model model = relu1_float_2_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(relu1_float_2_relaxed::examples);
+  const std::vector<Request> requests = createRequests(relu1_float_2_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2244,12 +2244,12 @@
   generated_tests::Execute(device,
                            relu6_float_1_relaxed::createTestModel,
                            relu6_float_1_relaxed::is_ignored,
-                           relu6_float_1_relaxed::examples);
+                           relu6_float_1_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, relu6_float_1_relaxed) {
   const Model model = relu6_float_1_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(relu6_float_1_relaxed::examples);
+  const std::vector<Request> requests = createRequests(relu6_float_1_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2267,12 +2267,12 @@
   generated_tests::Execute(device,
                            relu6_float_2_relaxed::createTestModel,
                            relu6_float_2_relaxed::is_ignored,
-                           relu6_float_2_relaxed::examples);
+                           relu6_float_2_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, relu6_float_2_relaxed) {
   const Model model = relu6_float_2_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(relu6_float_2_relaxed::examples);
+  const std::vector<Request> requests = createRequests(relu6_float_2_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2290,12 +2290,12 @@
   generated_tests::Execute(device,
                            relu_float_1_relaxed::createTestModel,
                            relu_float_1_relaxed::is_ignored,
-                           relu_float_1_relaxed::examples);
+                           relu_float_1_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, relu_float_1_relaxed) {
   const Model model = relu_float_1_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(relu_float_1_relaxed::examples);
+  const std::vector<Request> requests = createRequests(relu_float_1_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2313,12 +2313,12 @@
   generated_tests::Execute(device,
                            relu_float_2_relaxed::createTestModel,
                            relu_float_2_relaxed::is_ignored,
-                           relu_float_2_relaxed::examples);
+                           relu_float_2_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, relu_float_2_relaxed) {
   const Model model = relu_float_2_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(relu_float_2_relaxed::examples);
+  const std::vector<Request> requests = createRequests(relu_float_2_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2336,12 +2336,12 @@
   generated_tests::Execute(device,
                            reshape_relaxed::createTestModel,
                            reshape_relaxed::is_ignored,
-                           reshape_relaxed::examples);
+                           reshape_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, reshape_relaxed) {
   const Model model = reshape_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(reshape_relaxed::examples);
+  const std::vector<Request> requests = createRequests(reshape_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2359,12 +2359,12 @@
   generated_tests::Execute(device,
                            reshape_weights_as_inputs_relaxed::createTestModel,
                            reshape_weights_as_inputs_relaxed::is_ignored,
-                           reshape_weights_as_inputs_relaxed::examples);
+                           reshape_weights_as_inputs_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, reshape_weights_as_inputs_relaxed) {
   const Model model = reshape_weights_as_inputs_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(reshape_weights_as_inputs_relaxed::examples);
+  const std::vector<Request> requests = createRequests(reshape_weights_as_inputs_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2382,12 +2382,12 @@
   generated_tests::Execute(device,
                            resize_bilinear_2_relaxed::createTestModel,
                            resize_bilinear_2_relaxed::is_ignored,
-                           resize_bilinear_2_relaxed::examples);
+                           resize_bilinear_2_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, resize_bilinear_2_relaxed) {
   const Model model = resize_bilinear_2_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(resize_bilinear_2_relaxed::examples);
+  const std::vector<Request> requests = createRequests(resize_bilinear_2_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2405,12 +2405,12 @@
   generated_tests::Execute(device,
                            resize_bilinear_relaxed::createTestModel,
                            resize_bilinear_relaxed::is_ignored,
-                           resize_bilinear_relaxed::examples);
+                           resize_bilinear_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, resize_bilinear_relaxed) {
   const Model model = resize_bilinear_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(resize_bilinear_relaxed::examples);
+  const std::vector<Request> requests = createRequests(resize_bilinear_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2428,12 +2428,12 @@
   generated_tests::Execute(device,
                            rnn_relaxed::createTestModel,
                            rnn_relaxed::is_ignored,
-                           rnn_relaxed::examples);
+                           rnn_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, rnn_relaxed) {
   const Model model = rnn_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(rnn_relaxed::examples);
+  const std::vector<Request> requests = createRequests(rnn_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2451,12 +2451,12 @@
   generated_tests::Execute(device,
                            rnn_state_relaxed::createTestModel,
                            rnn_state_relaxed::is_ignored,
-                           rnn_state_relaxed::examples);
+                           rnn_state_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, rnn_state_relaxed) {
   const Model model = rnn_state_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(rnn_state_relaxed::examples);
+  const std::vector<Request> requests = createRequests(rnn_state_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2474,12 +2474,12 @@
   generated_tests::Execute(device,
                            softmax_float_1_relaxed::createTestModel,
                            softmax_float_1_relaxed::is_ignored,
-                           softmax_float_1_relaxed::examples);
+                           softmax_float_1_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, softmax_float_1_relaxed) {
   const Model model = softmax_float_1_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(softmax_float_1_relaxed::examples);
+  const std::vector<Request> requests = createRequests(softmax_float_1_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2497,12 +2497,12 @@
   generated_tests::Execute(device,
                            softmax_float_2_relaxed::createTestModel,
                            softmax_float_2_relaxed::is_ignored,
-                           softmax_float_2_relaxed::examples);
+                           softmax_float_2_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, softmax_float_2_relaxed) {
   const Model model = softmax_float_2_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(softmax_float_2_relaxed::examples);
+  const std::vector<Request> requests = createRequests(softmax_float_2_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2520,12 +2520,12 @@
   generated_tests::Execute(device,
                            space_to_batch::createTestModel,
                            space_to_batch::is_ignored,
-                           space_to_batch::examples);
+                           space_to_batch::get_examples());
 }
 
 TEST_F(ValidationTest, space_to_batch) {
   const Model model = space_to_batch::createTestModel();
-  const std::vector<Request> requests = createRequests(space_to_batch::examples);
+  const std::vector<Request> requests = createRequests(space_to_batch::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2543,12 +2543,12 @@
   generated_tests::Execute(device,
                            space_to_batch_float_1::createTestModel,
                            space_to_batch_float_1::is_ignored,
-                           space_to_batch_float_1::examples);
+                           space_to_batch_float_1::get_examples());
 }
 
 TEST_F(ValidationTest, space_to_batch_float_1) {
   const Model model = space_to_batch_float_1::createTestModel();
-  const std::vector<Request> requests = createRequests(space_to_batch_float_1::examples);
+  const std::vector<Request> requests = createRequests(space_to_batch_float_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2566,12 +2566,12 @@
   generated_tests::Execute(device,
                            space_to_batch_float_1_relaxed::createTestModel,
                            space_to_batch_float_1_relaxed::is_ignored,
-                           space_to_batch_float_1_relaxed::examples);
+                           space_to_batch_float_1_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, space_to_batch_float_1_relaxed) {
   const Model model = space_to_batch_float_1_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(space_to_batch_float_1_relaxed::examples);
+  const std::vector<Request> requests = createRequests(space_to_batch_float_1_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2589,12 +2589,12 @@
   generated_tests::Execute(device,
                            space_to_batch_float_2::createTestModel,
                            space_to_batch_float_2::is_ignored,
-                           space_to_batch_float_2::examples);
+                           space_to_batch_float_2::get_examples());
 }
 
 TEST_F(ValidationTest, space_to_batch_float_2) {
   const Model model = space_to_batch_float_2::createTestModel();
-  const std::vector<Request> requests = createRequests(space_to_batch_float_2::examples);
+  const std::vector<Request> requests = createRequests(space_to_batch_float_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2612,12 +2612,12 @@
   generated_tests::Execute(device,
                            space_to_batch_float_2_relaxed::createTestModel,
                            space_to_batch_float_2_relaxed::is_ignored,
-                           space_to_batch_float_2_relaxed::examples);
+                           space_to_batch_float_2_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, space_to_batch_float_2_relaxed) {
   const Model model = space_to_batch_float_2_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(space_to_batch_float_2_relaxed::examples);
+  const std::vector<Request> requests = createRequests(space_to_batch_float_2_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2635,12 +2635,12 @@
   generated_tests::Execute(device,
                            space_to_batch_float_3::createTestModel,
                            space_to_batch_float_3::is_ignored,
-                           space_to_batch_float_3::examples);
+                           space_to_batch_float_3::get_examples());
 }
 
 TEST_F(ValidationTest, space_to_batch_float_3) {
   const Model model = space_to_batch_float_3::createTestModel();
-  const std::vector<Request> requests = createRequests(space_to_batch_float_3::examples);
+  const std::vector<Request> requests = createRequests(space_to_batch_float_3::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2658,12 +2658,12 @@
   generated_tests::Execute(device,
                            space_to_batch_float_3_relaxed::createTestModel,
                            space_to_batch_float_3_relaxed::is_ignored,
-                           space_to_batch_float_3_relaxed::examples);
+                           space_to_batch_float_3_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, space_to_batch_float_3_relaxed) {
   const Model model = space_to_batch_float_3_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(space_to_batch_float_3_relaxed::examples);
+  const std::vector<Request> requests = createRequests(space_to_batch_float_3_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2681,12 +2681,12 @@
   generated_tests::Execute(device,
                            space_to_batch_quant8_1::createTestModel,
                            space_to_batch_quant8_1::is_ignored,
-                           space_to_batch_quant8_1::examples);
+                           space_to_batch_quant8_1::get_examples());
 }
 
 TEST_F(ValidationTest, space_to_batch_quant8_1) {
   const Model model = space_to_batch_quant8_1::createTestModel();
-  const std::vector<Request> requests = createRequests(space_to_batch_quant8_1::examples);
+  const std::vector<Request> requests = createRequests(space_to_batch_quant8_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2704,12 +2704,12 @@
   generated_tests::Execute(device,
                            space_to_batch_quant8_2::createTestModel,
                            space_to_batch_quant8_2::is_ignored,
-                           space_to_batch_quant8_2::examples);
+                           space_to_batch_quant8_2::get_examples());
 }
 
 TEST_F(ValidationTest, space_to_batch_quant8_2) {
   const Model model = space_to_batch_quant8_2::createTestModel();
-  const std::vector<Request> requests = createRequests(space_to_batch_quant8_2::examples);
+  const std::vector<Request> requests = createRequests(space_to_batch_quant8_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2727,12 +2727,12 @@
   generated_tests::Execute(device,
                            space_to_batch_quant8_3::createTestModel,
                            space_to_batch_quant8_3::is_ignored,
-                           space_to_batch_quant8_3::examples);
+                           space_to_batch_quant8_3::get_examples());
 }
 
 TEST_F(ValidationTest, space_to_batch_quant8_3) {
   const Model model = space_to_batch_quant8_3::createTestModel();
-  const std::vector<Request> requests = createRequests(space_to_batch_quant8_3::examples);
+  const std::vector<Request> requests = createRequests(space_to_batch_quant8_3::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2750,12 +2750,12 @@
   generated_tests::Execute(device,
                            space_to_batch_relaxed::createTestModel,
                            space_to_batch_relaxed::is_ignored,
-                           space_to_batch_relaxed::examples);
+                           space_to_batch_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, space_to_batch_relaxed) {
   const Model model = space_to_batch_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(space_to_batch_relaxed::examples);
+  const std::vector<Request> requests = createRequests(space_to_batch_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2773,12 +2773,12 @@
   generated_tests::Execute(device,
                            space_to_depth_float_1_relaxed::createTestModel,
                            space_to_depth_float_1_relaxed::is_ignored,
-                           space_to_depth_float_1_relaxed::examples);
+                           space_to_depth_float_1_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, space_to_depth_float_1_relaxed) {
   const Model model = space_to_depth_float_1_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(space_to_depth_float_1_relaxed::examples);
+  const std::vector<Request> requests = createRequests(space_to_depth_float_1_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2796,12 +2796,12 @@
   generated_tests::Execute(device,
                            space_to_depth_float_2_relaxed::createTestModel,
                            space_to_depth_float_2_relaxed::is_ignored,
-                           space_to_depth_float_2_relaxed::examples);
+                           space_to_depth_float_2_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, space_to_depth_float_2_relaxed) {
   const Model model = space_to_depth_float_2_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(space_to_depth_float_2_relaxed::examples);
+  const std::vector<Request> requests = createRequests(space_to_depth_float_2_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2819,12 +2819,12 @@
   generated_tests::Execute(device,
                            space_to_depth_float_3_relaxed::createTestModel,
                            space_to_depth_float_3_relaxed::is_ignored,
-                           space_to_depth_float_3_relaxed::examples);
+                           space_to_depth_float_3_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, space_to_depth_float_3_relaxed) {
   const Model model = space_to_depth_float_3_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(space_to_depth_float_3_relaxed::examples);
+  const std::vector<Request> requests = createRequests(space_to_depth_float_3_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2842,12 +2842,12 @@
   generated_tests::Execute(device,
                            squeeze::createTestModel,
                            squeeze::is_ignored,
-                           squeeze::examples);
+                           squeeze::get_examples());
 }
 
 TEST_F(ValidationTest, squeeze) {
   const Model model = squeeze::createTestModel();
-  const std::vector<Request> requests = createRequests(squeeze::examples);
+  const std::vector<Request> requests = createRequests(squeeze::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2865,12 +2865,12 @@
   generated_tests::Execute(device,
                            squeeze_float_1::createTestModel,
                            squeeze_float_1::is_ignored,
-                           squeeze_float_1::examples);
+                           squeeze_float_1::get_examples());
 }
 
 TEST_F(ValidationTest, squeeze_float_1) {
   const Model model = squeeze_float_1::createTestModel();
-  const std::vector<Request> requests = createRequests(squeeze_float_1::examples);
+  const std::vector<Request> requests = createRequests(squeeze_float_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2888,12 +2888,12 @@
   generated_tests::Execute(device,
                            squeeze_float_1_relaxed::createTestModel,
                            squeeze_float_1_relaxed::is_ignored,
-                           squeeze_float_1_relaxed::examples);
+                           squeeze_float_1_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, squeeze_float_1_relaxed) {
   const Model model = squeeze_float_1_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(squeeze_float_1_relaxed::examples);
+  const std::vector<Request> requests = createRequests(squeeze_float_1_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2911,12 +2911,12 @@
   generated_tests::Execute(device,
                            squeeze_quant8_1::createTestModel,
                            squeeze_quant8_1::is_ignored,
-                           squeeze_quant8_1::examples);
+                           squeeze_quant8_1::get_examples());
 }
 
 TEST_F(ValidationTest, squeeze_quant8_1) {
   const Model model = squeeze_quant8_1::createTestModel();
-  const std::vector<Request> requests = createRequests(squeeze_quant8_1::examples);
+  const std::vector<Request> requests = createRequests(squeeze_quant8_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2934,12 +2934,12 @@
   generated_tests::Execute(device,
                            squeeze_relaxed::createTestModel,
                            squeeze_relaxed::is_ignored,
-                           squeeze_relaxed::examples);
+                           squeeze_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, squeeze_relaxed) {
   const Model model = squeeze_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(squeeze_relaxed::examples);
+  const std::vector<Request> requests = createRequests(squeeze_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2957,12 +2957,12 @@
   generated_tests::Execute(device,
                            strided_slice::createTestModel,
                            strided_slice::is_ignored,
-                           strided_slice::examples);
+                           strided_slice::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice) {
   const Model model = strided_slice::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice::examples);
+  const std::vector<Request> requests = createRequests(strided_slice::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2980,12 +2980,12 @@
   generated_tests::Execute(device,
                            strided_slice_float_1::createTestModel,
                            strided_slice_float_1::is_ignored,
-                           strided_slice_float_1::examples);
+                           strided_slice_float_1::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_float_1) {
   const Model model = strided_slice_float_1::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_float_1::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_float_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3003,12 +3003,12 @@
   generated_tests::Execute(device,
                            strided_slice_float_10::createTestModel,
                            strided_slice_float_10::is_ignored,
-                           strided_slice_float_10::examples);
+                           strided_slice_float_10::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_float_10) {
   const Model model = strided_slice_float_10::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_float_10::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_float_10::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3026,12 +3026,12 @@
   generated_tests::Execute(device,
                            strided_slice_float_10_relaxed::createTestModel,
                            strided_slice_float_10_relaxed::is_ignored,
-                           strided_slice_float_10_relaxed::examples);
+                           strided_slice_float_10_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_float_10_relaxed) {
   const Model model = strided_slice_float_10_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_float_10_relaxed::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_float_10_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3049,12 +3049,12 @@
   generated_tests::Execute(device,
                            strided_slice_float_11::createTestModel,
                            strided_slice_float_11::is_ignored,
-                           strided_slice_float_11::examples);
+                           strided_slice_float_11::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_float_11) {
   const Model model = strided_slice_float_11::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_float_11::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_float_11::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3072,12 +3072,12 @@
   generated_tests::Execute(device,
                            strided_slice_float_11_relaxed::createTestModel,
                            strided_slice_float_11_relaxed::is_ignored,
-                           strided_slice_float_11_relaxed::examples);
+                           strided_slice_float_11_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_float_11_relaxed) {
   const Model model = strided_slice_float_11_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_float_11_relaxed::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_float_11_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3095,12 +3095,12 @@
   generated_tests::Execute(device,
                            strided_slice_float_1_relaxed::createTestModel,
                            strided_slice_float_1_relaxed::is_ignored,
-                           strided_slice_float_1_relaxed::examples);
+                           strided_slice_float_1_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_float_1_relaxed) {
   const Model model = strided_slice_float_1_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_float_1_relaxed::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_float_1_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3118,12 +3118,12 @@
   generated_tests::Execute(device,
                            strided_slice_float_2::createTestModel,
                            strided_slice_float_2::is_ignored,
-                           strided_slice_float_2::examples);
+                           strided_slice_float_2::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_float_2) {
   const Model model = strided_slice_float_2::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_float_2::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_float_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3141,12 +3141,12 @@
   generated_tests::Execute(device,
                            strided_slice_float_2_relaxed::createTestModel,
                            strided_slice_float_2_relaxed::is_ignored,
-                           strided_slice_float_2_relaxed::examples);
+                           strided_slice_float_2_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_float_2_relaxed) {
   const Model model = strided_slice_float_2_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_float_2_relaxed::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_float_2_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3164,12 +3164,12 @@
   generated_tests::Execute(device,
                            strided_slice_float_3::createTestModel,
                            strided_slice_float_3::is_ignored,
-                           strided_slice_float_3::examples);
+                           strided_slice_float_3::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_float_3) {
   const Model model = strided_slice_float_3::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_float_3::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_float_3::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3187,12 +3187,12 @@
   generated_tests::Execute(device,
                            strided_slice_float_3_relaxed::createTestModel,
                            strided_slice_float_3_relaxed::is_ignored,
-                           strided_slice_float_3_relaxed::examples);
+                           strided_slice_float_3_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_float_3_relaxed) {
   const Model model = strided_slice_float_3_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_float_3_relaxed::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_float_3_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3210,12 +3210,12 @@
   generated_tests::Execute(device,
                            strided_slice_float_4::createTestModel,
                            strided_slice_float_4::is_ignored,
-                           strided_slice_float_4::examples);
+                           strided_slice_float_4::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_float_4) {
   const Model model = strided_slice_float_4::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_float_4::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_float_4::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3233,12 +3233,12 @@
   generated_tests::Execute(device,
                            strided_slice_float_4_relaxed::createTestModel,
                            strided_slice_float_4_relaxed::is_ignored,
-                           strided_slice_float_4_relaxed::examples);
+                           strided_slice_float_4_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_float_4_relaxed) {
   const Model model = strided_slice_float_4_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_float_4_relaxed::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_float_4_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3256,12 +3256,12 @@
   generated_tests::Execute(device,
                            strided_slice_float_5::createTestModel,
                            strided_slice_float_5::is_ignored,
-                           strided_slice_float_5::examples);
+                           strided_slice_float_5::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_float_5) {
   const Model model = strided_slice_float_5::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_float_5::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_float_5::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3279,12 +3279,12 @@
   generated_tests::Execute(device,
                            strided_slice_float_5_relaxed::createTestModel,
                            strided_slice_float_5_relaxed::is_ignored,
-                           strided_slice_float_5_relaxed::examples);
+                           strided_slice_float_5_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_float_5_relaxed) {
   const Model model = strided_slice_float_5_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_float_5_relaxed::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_float_5_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3302,12 +3302,12 @@
   generated_tests::Execute(device,
                            strided_slice_float_6::createTestModel,
                            strided_slice_float_6::is_ignored,
-                           strided_slice_float_6::examples);
+                           strided_slice_float_6::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_float_6) {
   const Model model = strided_slice_float_6::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_float_6::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_float_6::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3325,12 +3325,12 @@
   generated_tests::Execute(device,
                            strided_slice_float_6_relaxed::createTestModel,
                            strided_slice_float_6_relaxed::is_ignored,
-                           strided_slice_float_6_relaxed::examples);
+                           strided_slice_float_6_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_float_6_relaxed) {
   const Model model = strided_slice_float_6_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_float_6_relaxed::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_float_6_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3348,12 +3348,12 @@
   generated_tests::Execute(device,
                            strided_slice_float_7::createTestModel,
                            strided_slice_float_7::is_ignored,
-                           strided_slice_float_7::examples);
+                           strided_slice_float_7::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_float_7) {
   const Model model = strided_slice_float_7::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_float_7::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_float_7::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3371,12 +3371,12 @@
   generated_tests::Execute(device,
                            strided_slice_float_7_relaxed::createTestModel,
                            strided_slice_float_7_relaxed::is_ignored,
-                           strided_slice_float_7_relaxed::examples);
+                           strided_slice_float_7_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_float_7_relaxed) {
   const Model model = strided_slice_float_7_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_float_7_relaxed::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_float_7_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3394,12 +3394,12 @@
   generated_tests::Execute(device,
                            strided_slice_float_8::createTestModel,
                            strided_slice_float_8::is_ignored,
-                           strided_slice_float_8::examples);
+                           strided_slice_float_8::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_float_8) {
   const Model model = strided_slice_float_8::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_float_8::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_float_8::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3417,12 +3417,12 @@
   generated_tests::Execute(device,
                            strided_slice_float_8_relaxed::createTestModel,
                            strided_slice_float_8_relaxed::is_ignored,
-                           strided_slice_float_8_relaxed::examples);
+                           strided_slice_float_8_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_float_8_relaxed) {
   const Model model = strided_slice_float_8_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_float_8_relaxed::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_float_8_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3440,12 +3440,12 @@
   generated_tests::Execute(device,
                            strided_slice_float_9::createTestModel,
                            strided_slice_float_9::is_ignored,
-                           strided_slice_float_9::examples);
+                           strided_slice_float_9::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_float_9) {
   const Model model = strided_slice_float_9::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_float_9::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_float_9::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3463,12 +3463,12 @@
   generated_tests::Execute(device,
                            strided_slice_float_9_relaxed::createTestModel,
                            strided_slice_float_9_relaxed::is_ignored,
-                           strided_slice_float_9_relaxed::examples);
+                           strided_slice_float_9_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_float_9_relaxed) {
   const Model model = strided_slice_float_9_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_float_9_relaxed::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_float_9_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3486,12 +3486,12 @@
   generated_tests::Execute(device,
                            strided_slice_qaunt8_10::createTestModel,
                            strided_slice_qaunt8_10::is_ignored,
-                           strided_slice_qaunt8_10::examples);
+                           strided_slice_qaunt8_10::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_qaunt8_10) {
   const Model model = strided_slice_qaunt8_10::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_qaunt8_10::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_qaunt8_10::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3509,12 +3509,12 @@
   generated_tests::Execute(device,
                            strided_slice_qaunt8_11::createTestModel,
                            strided_slice_qaunt8_11::is_ignored,
-                           strided_slice_qaunt8_11::examples);
+                           strided_slice_qaunt8_11::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_qaunt8_11) {
   const Model model = strided_slice_qaunt8_11::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_qaunt8_11::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_qaunt8_11::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3532,12 +3532,12 @@
   generated_tests::Execute(device,
                            strided_slice_quant8_1::createTestModel,
                            strided_slice_quant8_1::is_ignored,
-                           strided_slice_quant8_1::examples);
+                           strided_slice_quant8_1::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_quant8_1) {
   const Model model = strided_slice_quant8_1::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_quant8_1::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_quant8_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3555,12 +3555,12 @@
   generated_tests::Execute(device,
                            strided_slice_quant8_2::createTestModel,
                            strided_slice_quant8_2::is_ignored,
-                           strided_slice_quant8_2::examples);
+                           strided_slice_quant8_2::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_quant8_2) {
   const Model model = strided_slice_quant8_2::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_quant8_2::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_quant8_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3578,12 +3578,12 @@
   generated_tests::Execute(device,
                            strided_slice_quant8_3::createTestModel,
                            strided_slice_quant8_3::is_ignored,
-                           strided_slice_quant8_3::examples);
+                           strided_slice_quant8_3::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_quant8_3) {
   const Model model = strided_slice_quant8_3::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_quant8_3::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_quant8_3::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3601,12 +3601,12 @@
   generated_tests::Execute(device,
                            strided_slice_quant8_4::createTestModel,
                            strided_slice_quant8_4::is_ignored,
-                           strided_slice_quant8_4::examples);
+                           strided_slice_quant8_4::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_quant8_4) {
   const Model model = strided_slice_quant8_4::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_quant8_4::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_quant8_4::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3624,12 +3624,12 @@
   generated_tests::Execute(device,
                            strided_slice_quant8_5::createTestModel,
                            strided_slice_quant8_5::is_ignored,
-                           strided_slice_quant8_5::examples);
+                           strided_slice_quant8_5::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_quant8_5) {
   const Model model = strided_slice_quant8_5::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_quant8_5::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_quant8_5::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3647,12 +3647,12 @@
   generated_tests::Execute(device,
                            strided_slice_quant8_6::createTestModel,
                            strided_slice_quant8_6::is_ignored,
-                           strided_slice_quant8_6::examples);
+                           strided_slice_quant8_6::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_quant8_6) {
   const Model model = strided_slice_quant8_6::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_quant8_6::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_quant8_6::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3670,12 +3670,12 @@
   generated_tests::Execute(device,
                            strided_slice_quant8_7::createTestModel,
                            strided_slice_quant8_7::is_ignored,
-                           strided_slice_quant8_7::examples);
+                           strided_slice_quant8_7::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_quant8_7) {
   const Model model = strided_slice_quant8_7::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_quant8_7::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_quant8_7::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3693,12 +3693,12 @@
   generated_tests::Execute(device,
                            strided_slice_quant8_8::createTestModel,
                            strided_slice_quant8_8::is_ignored,
-                           strided_slice_quant8_8::examples);
+                           strided_slice_quant8_8::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_quant8_8) {
   const Model model = strided_slice_quant8_8::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_quant8_8::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_quant8_8::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3716,12 +3716,12 @@
   generated_tests::Execute(device,
                            strided_slice_quant8_9::createTestModel,
                            strided_slice_quant8_9::is_ignored,
-                           strided_slice_quant8_9::examples);
+                           strided_slice_quant8_9::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_quant8_9) {
   const Model model = strided_slice_quant8_9::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_quant8_9::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_quant8_9::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3739,12 +3739,12 @@
   generated_tests::Execute(device,
                            strided_slice_relaxed::createTestModel,
                            strided_slice_relaxed::is_ignored,
-                           strided_slice_relaxed::examples);
+                           strided_slice_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, strided_slice_relaxed) {
   const Model model = strided_slice_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(strided_slice_relaxed::examples);
+  const std::vector<Request> requests = createRequests(strided_slice_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3762,12 +3762,12 @@
   generated_tests::Execute(device,
                            sub::createTestModel,
                            sub::is_ignored,
-                           sub::examples);
+                           sub::get_examples());
 }
 
 TEST_F(ValidationTest, sub) {
   const Model model = sub::createTestModel();
-  const std::vector<Request> requests = createRequests(sub::examples);
+  const std::vector<Request> requests = createRequests(sub::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3785,12 +3785,12 @@
   generated_tests::Execute(device,
                            sub_broadcast_float::createTestModel,
                            sub_broadcast_float::is_ignored,
-                           sub_broadcast_float::examples);
+                           sub_broadcast_float::get_examples());
 }
 
 TEST_F(ValidationTest, sub_broadcast_float) {
   const Model model = sub_broadcast_float::createTestModel();
-  const std::vector<Request> requests = createRequests(sub_broadcast_float::examples);
+  const std::vector<Request> requests = createRequests(sub_broadcast_float::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3808,12 +3808,12 @@
   generated_tests::Execute(device,
                            sub_broadcast_float_relaxed::createTestModel,
                            sub_broadcast_float_relaxed::is_ignored,
-                           sub_broadcast_float_relaxed::examples);
+                           sub_broadcast_float_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, sub_broadcast_float_relaxed) {
   const Model model = sub_broadcast_float_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(sub_broadcast_float_relaxed::examples);
+  const std::vector<Request> requests = createRequests(sub_broadcast_float_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3831,12 +3831,12 @@
   generated_tests::Execute(device,
                            sub_relaxed::createTestModel,
                            sub_relaxed::is_ignored,
-                           sub_relaxed::examples);
+                           sub_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, sub_relaxed) {
   const Model model = sub_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(sub_relaxed::examples);
+  const std::vector<Request> requests = createRequests(sub_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3854,12 +3854,12 @@
   generated_tests::Execute(device,
                            svdf2_relaxed::createTestModel,
                            svdf2_relaxed::is_ignored,
-                           svdf2_relaxed::examples);
+                           svdf2_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, svdf2_relaxed) {
   const Model model = svdf2_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(svdf2_relaxed::examples);
+  const std::vector<Request> requests = createRequests(svdf2_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3877,12 +3877,12 @@
   generated_tests::Execute(device,
                            svdf_relaxed::createTestModel,
                            svdf_relaxed::is_ignored,
-                           svdf_relaxed::examples);
+                           svdf_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, svdf_relaxed) {
   const Model model = svdf_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(svdf_relaxed::examples);
+  const std::vector<Request> requests = createRequests(svdf_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3900,12 +3900,12 @@
   generated_tests::Execute(device,
                            svdf_state_relaxed::createTestModel,
                            svdf_state_relaxed::is_ignored,
-                           svdf_state_relaxed::examples);
+                           svdf_state_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, svdf_state_relaxed) {
   const Model model = svdf_state_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(svdf_state_relaxed::examples);
+  const std::vector<Request> requests = createRequests(svdf_state_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3923,12 +3923,12 @@
   generated_tests::Execute(device,
                            tanh_relaxed::createTestModel,
                            tanh_relaxed::is_ignored,
-                           tanh_relaxed::examples);
+                           tanh_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, tanh_relaxed) {
   const Model model = tanh_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(tanh_relaxed::examples);
+  const std::vector<Request> requests = createRequests(tanh_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3946,12 +3946,12 @@
   generated_tests::Execute(device,
                            transpose::createTestModel,
                            transpose::is_ignored,
-                           transpose::examples);
+                           transpose::get_examples());
 }
 
 TEST_F(ValidationTest, transpose) {
   const Model model = transpose::createTestModel();
-  const std::vector<Request> requests = createRequests(transpose::examples);
+  const std::vector<Request> requests = createRequests(transpose::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3969,12 +3969,12 @@
   generated_tests::Execute(device,
                            transpose_float_1::createTestModel,
                            transpose_float_1::is_ignored,
-                           transpose_float_1::examples);
+                           transpose_float_1::get_examples());
 }
 
 TEST_F(ValidationTest, transpose_float_1) {
   const Model model = transpose_float_1::createTestModel();
-  const std::vector<Request> requests = createRequests(transpose_float_1::examples);
+  const std::vector<Request> requests = createRequests(transpose_float_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3992,12 +3992,12 @@
   generated_tests::Execute(device,
                            transpose_float_1_relaxed::createTestModel,
                            transpose_float_1_relaxed::is_ignored,
-                           transpose_float_1_relaxed::examples);
+                           transpose_float_1_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, transpose_float_1_relaxed) {
   const Model model = transpose_float_1_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(transpose_float_1_relaxed::examples);
+  const std::vector<Request> requests = createRequests(transpose_float_1_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4015,12 +4015,12 @@
   generated_tests::Execute(device,
                            transpose_quant8_1::createTestModel,
                            transpose_quant8_1::is_ignored,
-                           transpose_quant8_1::examples);
+                           transpose_quant8_1::get_examples());
 }
 
 TEST_F(ValidationTest, transpose_quant8_1) {
   const Model model = transpose_quant8_1::createTestModel();
-  const std::vector<Request> requests = createRequests(transpose_quant8_1::examples);
+  const std::vector<Request> requests = createRequests(transpose_quant8_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4038,12 +4038,12 @@
   generated_tests::Execute(device,
                            transpose_relaxed::createTestModel,
                            transpose_relaxed::is_ignored,
-                           transpose_relaxed::examples);
+                           transpose_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, transpose_relaxed) {
   const Model model = transpose_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(transpose_relaxed::examples);
+  const std::vector<Request> requests = createRequests(transpose_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
diff --git a/nn/runtime/test/generated/all_generated_V1_2_vts_tests.cpp b/nn/runtime/test/generated/all_generated_V1_2_vts_tests.cpp
index 6c53f91..08a8028 100644
--- a/nn/runtime/test/generated/all_generated_V1_2_vts_tests.cpp
+++ b/nn/runtime/test/generated/all_generated_V1_2_vts_tests.cpp
@@ -1,6 +1,59 @@
 // clang-format off
 // DO NOT EDIT;
 // Generated by ml/nn/runtime/test/specs/generate_vts_test.sh
+// Generated from: abs.mod.py.
+namespace abs {
+// Generated abs test
+#include "examples/abs.example.cpp"
+// Generated model constructor
+#include "vts_models/abs.model.cpp"
+} // namespace abs
+
+TEST_F(NeuralnetworksHidlTest, abs) {
+  generated_tests::Execute(device,
+                           abs::createTestModel,
+                           abs::is_ignored,
+                           abs::get_examples());
+}
+
+TEST_F(ValidationTest, abs) {
+  const Model model = abs::createTestModel();
+  const std::vector<Request> requests = createRequests(abs::get_examples());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, abs_relaxed) {
+  generated_tests::Execute(device,
+                           abs::createTestModel_relaxed,
+                           abs::is_ignored_relaxed,
+                           abs::get_examples_relaxed());
+}
+
+TEST_F(ValidationTest, abs_relaxed) {
+  const Model model = abs::createTestModel_relaxed();
+  const std::vector<Request> requests = createRequests(abs::get_examples_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, abs_float16) {
+  generated_tests::Execute(device,
+                           abs::createTestModel_float16,
+                           abs::is_ignored_float16,
+                           abs::get_examples_float16());
+}
+
+TEST_F(ValidationTest, abs_float16) {
+  const Model model = abs::createTestModel_float16();
+  const std::vector<Request> requests = createRequests(abs::get_examples_float16());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
 // Generated from: add_broadcast_float16.mod.py.
 namespace add_broadcast_float16 {
 // Generated add_broadcast_float16 test
@@ -13,12 +66,12 @@
   generated_tests::Execute(device,
                            add_broadcast_float16::createTestModel,
                            add_broadcast_float16::is_ignored,
-                           add_broadcast_float16::examples);
+                           add_broadcast_float16::get_examples());
 }
 
 TEST_F(ValidationTest, add_broadcast_float16) {
   const Model model = add_broadcast_float16::createTestModel();
-  const std::vector<Request> requests = createRequests(add_broadcast_float16::examples);
+  const std::vector<Request> requests = createRequests(add_broadcast_float16::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -36,426 +89,510 @@
   generated_tests::Execute(device,
                            add_float16::createTestModel,
                            add_float16::is_ignored,
-                           add_float16::examples);
+                           add_float16::get_examples());
 }
 
 TEST_F(ValidationTest, add_float16) {
   const Model model = add_float16::createTestModel();
-  const std::vector<Request> requests = createRequests(add_float16::examples);
+  const std::vector<Request> requests = createRequests(add_float16::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
 
 
-// Generated from: argmax_1_float.mod.py.
-namespace argmax_1_float {
-// Generated argmax_1_float test
-#include "examples/argmax_1_float.example.cpp"
+// Generated from: argmax_1.mod.py.
+namespace argmax_1 {
+// Generated argmax_1 test
+#include "examples/argmax_1.example.cpp"
 // Generated model constructor
-#include "vts_models/argmax_1_float.model.cpp"
-} // namespace argmax_1_float
+#include "vts_models/argmax_1.model.cpp"
+} // namespace argmax_1
 
-TEST_F(NeuralnetworksHidlTest, argmax_1_float) {
+TEST_F(NeuralnetworksHidlTest, argmax_1) {
   generated_tests::Execute(device,
-                           argmax_1_float::createTestModel,
-                           argmax_1_float::is_ignored,
-                           argmax_1_float::examples);
+                           argmax_1::createTestModel,
+                           argmax_1::is_ignored,
+                           argmax_1::get_examples());
 }
 
-TEST_F(ValidationTest, argmax_1_float) {
-  const Model model = argmax_1_float::createTestModel();
-  const std::vector<Request> requests = createRequests(argmax_1_float::examples);
+TEST_F(ValidationTest, argmax_1) {
+  const Model model = argmax_1::createTestModel();
+  const std::vector<Request> requests = createRequests(argmax_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
 
 
-// Generated from: argmax_1_float_relaxed.mod.py.
-namespace argmax_1_float_relaxed {
-// Generated argmax_1_float_relaxed test
-#include "examples/argmax_1_float_relaxed.example.cpp"
-// Generated model constructor
-#include "vts_models/argmax_1_float_relaxed.model.cpp"
-} // namespace argmax_1_float_relaxed
-
-TEST_F(NeuralnetworksHidlTest, argmax_1_float_relaxed) {
+TEST_F(NeuralnetworksHidlTest, argmax_1_relaxed) {
   generated_tests::Execute(device,
-                           argmax_1_float_relaxed::createTestModel,
-                           argmax_1_float_relaxed::is_ignored,
-                           argmax_1_float_relaxed::examples);
+                           argmax_1::createTestModel_relaxed,
+                           argmax_1::is_ignored_relaxed,
+                           argmax_1::get_examples_relaxed());
 }
 
-TEST_F(ValidationTest, argmax_1_float_relaxed) {
-  const Model model = argmax_1_float_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(argmax_1_float_relaxed::examples);
+TEST_F(ValidationTest, argmax_1_relaxed) {
+  const Model model = argmax_1::createTestModel_relaxed();
+  const std::vector<Request> requests = createRequests(argmax_1::get_examples_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
 
 
-// Generated from: argmax_1_int32.mod.py.
-namespace argmax_1_int32 {
-// Generated argmax_1_int32 test
-#include "examples/argmax_1_int32.example.cpp"
-// Generated model constructor
-#include "vts_models/argmax_1_int32.model.cpp"
-} // namespace argmax_1_int32
+TEST_F(NeuralnetworksHidlTest, argmax_1_float16) {
+  generated_tests::Execute(device,
+                           argmax_1::createTestModel_float16,
+                           argmax_1::is_ignored_float16,
+                           argmax_1::get_examples_float16());
+}
+
+TEST_F(ValidationTest, argmax_1_float16) {
+  const Model model = argmax_1::createTestModel_float16();
+  const std::vector<Request> requests = createRequests(argmax_1::get_examples_float16());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
 
 TEST_F(NeuralnetworksHidlTest, argmax_1_int32) {
   generated_tests::Execute(device,
-                           argmax_1_int32::createTestModel,
-                           argmax_1_int32::is_ignored,
-                           argmax_1_int32::examples);
+                           argmax_1::createTestModel_int32,
+                           argmax_1::is_ignored_int32,
+                           argmax_1::get_examples_int32());
 }
 
 TEST_F(ValidationTest, argmax_1_int32) {
-  const Model model = argmax_1_int32::createTestModel();
-  const std::vector<Request> requests = createRequests(argmax_1_int32::examples);
+  const Model model = argmax_1::createTestModel_int32();
+  const std::vector<Request> requests = createRequests(argmax_1::get_examples_int32());
   validateModel(model);
   validateRequests(model, requests);
 }
 
 
-// Generated from: argmax_1_quant8.mod.py.
-namespace argmax_1_quant8 {
-// Generated argmax_1_quant8 test
-#include "examples/argmax_1_quant8.example.cpp"
-// Generated model constructor
-#include "vts_models/argmax_1_quant8.model.cpp"
-} // namespace argmax_1_quant8
-
 TEST_F(NeuralnetworksHidlTest, argmax_1_quant8) {
   generated_tests::Execute(device,
-                           argmax_1_quant8::createTestModel,
-                           argmax_1_quant8::is_ignored,
-                           argmax_1_quant8::examples);
+                           argmax_1::createTestModel_quant8,
+                           argmax_1::is_ignored_quant8,
+                           argmax_1::get_examples_quant8());
 }
 
 TEST_F(ValidationTest, argmax_1_quant8) {
-  const Model model = argmax_1_quant8::createTestModel();
-  const std::vector<Request> requests = createRequests(argmax_1_quant8::examples);
+  const Model model = argmax_1::createTestModel_quant8();
+  const std::vector<Request> requests = createRequests(argmax_1::get_examples_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
 
 
-// Generated from: argmax_2_float.mod.py.
-namespace argmax_2_float {
-// Generated argmax_2_float test
-#include "examples/argmax_2_float.example.cpp"
+// Generated from: argmax_2.mod.py.
+namespace argmax_2 {
+// Generated argmax_2 test
+#include "examples/argmax_2.example.cpp"
 // Generated model constructor
-#include "vts_models/argmax_2_float.model.cpp"
-} // namespace argmax_2_float
+#include "vts_models/argmax_2.model.cpp"
+} // namespace argmax_2
 
-TEST_F(NeuralnetworksHidlTest, argmax_2_float) {
+TEST_F(NeuralnetworksHidlTest, argmax_2) {
   generated_tests::Execute(device,
-                           argmax_2_float::createTestModel,
-                           argmax_2_float::is_ignored,
-                           argmax_2_float::examples);
+                           argmax_2::createTestModel,
+                           argmax_2::is_ignored,
+                           argmax_2::get_examples());
 }
 
-TEST_F(ValidationTest, argmax_2_float) {
-  const Model model = argmax_2_float::createTestModel();
-  const std::vector<Request> requests = createRequests(argmax_2_float::examples);
+TEST_F(ValidationTest, argmax_2) {
+  const Model model = argmax_2::createTestModel();
+  const std::vector<Request> requests = createRequests(argmax_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
 
 
-// Generated from: argmax_2_float_relaxed.mod.py.
-namespace argmax_2_float_relaxed {
-// Generated argmax_2_float_relaxed test
-#include "examples/argmax_2_float_relaxed.example.cpp"
-// Generated model constructor
-#include "vts_models/argmax_2_float_relaxed.model.cpp"
-} // namespace argmax_2_float_relaxed
-
-TEST_F(NeuralnetworksHidlTest, argmax_2_float_relaxed) {
+TEST_F(NeuralnetworksHidlTest, argmax_2_relaxed) {
   generated_tests::Execute(device,
-                           argmax_2_float_relaxed::createTestModel,
-                           argmax_2_float_relaxed::is_ignored,
-                           argmax_2_float_relaxed::examples);
+                           argmax_2::createTestModel_relaxed,
+                           argmax_2::is_ignored_relaxed,
+                           argmax_2::get_examples_relaxed());
 }
 
-TEST_F(ValidationTest, argmax_2_float_relaxed) {
-  const Model model = argmax_2_float_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(argmax_2_float_relaxed::examples);
+TEST_F(ValidationTest, argmax_2_relaxed) {
+  const Model model = argmax_2::createTestModel_relaxed();
+  const std::vector<Request> requests = createRequests(argmax_2::get_examples_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
 
 
-// Generated from: argmax_2_int32.mod.py.
-namespace argmax_2_int32 {
-// Generated argmax_2_int32 test
-#include "examples/argmax_2_int32.example.cpp"
-// Generated model constructor
-#include "vts_models/argmax_2_int32.model.cpp"
-} // namespace argmax_2_int32
+TEST_F(NeuralnetworksHidlTest, argmax_2_float16) {
+  generated_tests::Execute(device,
+                           argmax_2::createTestModel_float16,
+                           argmax_2::is_ignored_float16,
+                           argmax_2::get_examples_float16());
+}
+
+TEST_F(ValidationTest, argmax_2_float16) {
+  const Model model = argmax_2::createTestModel_float16();
+  const std::vector<Request> requests = createRequests(argmax_2::get_examples_float16());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
 
 TEST_F(NeuralnetworksHidlTest, argmax_2_int32) {
   generated_tests::Execute(device,
-                           argmax_2_int32::createTestModel,
-                           argmax_2_int32::is_ignored,
-                           argmax_2_int32::examples);
+                           argmax_2::createTestModel_int32,
+                           argmax_2::is_ignored_int32,
+                           argmax_2::get_examples_int32());
 }
 
 TEST_F(ValidationTest, argmax_2_int32) {
-  const Model model = argmax_2_int32::createTestModel();
-  const std::vector<Request> requests = createRequests(argmax_2_int32::examples);
+  const Model model = argmax_2::createTestModel_int32();
+  const std::vector<Request> requests = createRequests(argmax_2::get_examples_int32());
   validateModel(model);
   validateRequests(model, requests);
 }
 
 
-// Generated from: argmax_2_quant8.mod.py.
-namespace argmax_2_quant8 {
-// Generated argmax_2_quant8 test
-#include "examples/argmax_2_quant8.example.cpp"
-// Generated model constructor
-#include "vts_models/argmax_2_quant8.model.cpp"
-} // namespace argmax_2_quant8
-
 TEST_F(NeuralnetworksHidlTest, argmax_2_quant8) {
   generated_tests::Execute(device,
-                           argmax_2_quant8::createTestModel,
-                           argmax_2_quant8::is_ignored,
-                           argmax_2_quant8::examples);
+                           argmax_2::createTestModel_quant8,
+                           argmax_2::is_ignored_quant8,
+                           argmax_2::get_examples_quant8());
 }
 
 TEST_F(ValidationTest, argmax_2_quant8) {
-  const Model model = argmax_2_quant8::createTestModel();
-  const std::vector<Request> requests = createRequests(argmax_2_quant8::examples);
+  const Model model = argmax_2::createTestModel_quant8();
+  const std::vector<Request> requests = createRequests(argmax_2::get_examples_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
 
 
-// Generated from: argmax_3_float.mod.py.
-namespace argmax_3_float {
-// Generated argmax_3_float test
-#include "examples/argmax_3_float.example.cpp"
+// Generated from: argmax_3.mod.py.
+namespace argmax_3 {
+// Generated argmax_3 test
+#include "examples/argmax_3.example.cpp"
 // Generated model constructor
-#include "vts_models/argmax_3_float.model.cpp"
-} // namespace argmax_3_float
+#include "vts_models/argmax_3.model.cpp"
+} // namespace argmax_3
 
-TEST_F(NeuralnetworksHidlTest, argmax_3_float) {
+TEST_F(NeuralnetworksHidlTest, argmax_3) {
   generated_tests::Execute(device,
-                           argmax_3_float::createTestModel,
-                           argmax_3_float::is_ignored,
-                           argmax_3_float::examples);
+                           argmax_3::createTestModel,
+                           argmax_3::is_ignored,
+                           argmax_3::get_examples());
 }
 
-TEST_F(ValidationTest, argmax_3_float) {
-  const Model model = argmax_3_float::createTestModel();
-  const std::vector<Request> requests = createRequests(argmax_3_float::examples);
+TEST_F(ValidationTest, argmax_3) {
+  const Model model = argmax_3::createTestModel();
+  const std::vector<Request> requests = createRequests(argmax_3::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
 
 
-// Generated from: argmin_1_float.mod.py.
-namespace argmin_1_float {
-// Generated argmin_1_float test
-#include "examples/argmin_1_float.example.cpp"
-// Generated model constructor
-#include "vts_models/argmin_1_float.model.cpp"
-} // namespace argmin_1_float
-
-TEST_F(NeuralnetworksHidlTest, argmin_1_float) {
+TEST_F(NeuralnetworksHidlTest, argmax_3_relaxed) {
   generated_tests::Execute(device,
-                           argmin_1_float::createTestModel,
-                           argmin_1_float::is_ignored,
-                           argmin_1_float::examples);
+                           argmax_3::createTestModel_relaxed,
+                           argmax_3::is_ignored_relaxed,
+                           argmax_3::get_examples_relaxed());
 }
 
-TEST_F(ValidationTest, argmin_1_float) {
-  const Model model = argmin_1_float::createTestModel();
-  const std::vector<Request> requests = createRequests(argmin_1_float::examples);
+TEST_F(ValidationTest, argmax_3_relaxed) {
+  const Model model = argmax_3::createTestModel_relaxed();
+  const std::vector<Request> requests = createRequests(argmax_3::get_examples_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
 
 
-// Generated from: argmin_1_float_relaxed.mod.py.
-namespace argmin_1_float_relaxed {
-// Generated argmin_1_float_relaxed test
-#include "examples/argmin_1_float_relaxed.example.cpp"
-// Generated model constructor
-#include "vts_models/argmin_1_float_relaxed.model.cpp"
-} // namespace argmin_1_float_relaxed
-
-TEST_F(NeuralnetworksHidlTest, argmin_1_float_relaxed) {
+TEST_F(NeuralnetworksHidlTest, argmax_3_float16) {
   generated_tests::Execute(device,
-                           argmin_1_float_relaxed::createTestModel,
-                           argmin_1_float_relaxed::is_ignored,
-                           argmin_1_float_relaxed::examples);
+                           argmax_3::createTestModel_float16,
+                           argmax_3::is_ignored_float16,
+                           argmax_3::get_examples_float16());
 }
 
-TEST_F(ValidationTest, argmin_1_float_relaxed) {
-  const Model model = argmin_1_float_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(argmin_1_float_relaxed::examples);
+TEST_F(ValidationTest, argmax_3_float16) {
+  const Model model = argmax_3::createTestModel_float16();
+  const std::vector<Request> requests = createRequests(argmax_3::get_examples_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
 
 
-// Generated from: argmin_1_int32.mod.py.
-namespace argmin_1_int32 {
-// Generated argmin_1_int32 test
-#include "examples/argmin_1_int32.example.cpp"
+TEST_F(NeuralnetworksHidlTest, argmax_3_int32) {
+  generated_tests::Execute(device,
+                           argmax_3::createTestModel_int32,
+                           argmax_3::is_ignored_int32,
+                           argmax_3::get_examples_int32());
+}
+
+TEST_F(ValidationTest, argmax_3_int32) {
+  const Model model = argmax_3::createTestModel_int32();
+  const std::vector<Request> requests = createRequests(argmax_3::get_examples_int32());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, argmax_3_quant8) {
+  generated_tests::Execute(device,
+                           argmax_3::createTestModel_quant8,
+                           argmax_3::is_ignored_quant8,
+                           argmax_3::get_examples_quant8());
+}
+
+TEST_F(ValidationTest, argmax_3_quant8) {
+  const Model model = argmax_3::createTestModel_quant8();
+  const std::vector<Request> requests = createRequests(argmax_3::get_examples_quant8());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+// Generated from: argmin_1.mod.py.
+namespace argmin_1 {
+// Generated argmin_1 test
+#include "examples/argmin_1.example.cpp"
 // Generated model constructor
-#include "vts_models/argmin_1_int32.model.cpp"
-} // namespace argmin_1_int32
+#include "vts_models/argmin_1.model.cpp"
+} // namespace argmin_1
+
+TEST_F(NeuralnetworksHidlTest, argmin_1) {
+  generated_tests::Execute(device,
+                           argmin_1::createTestModel,
+                           argmin_1::is_ignored,
+                           argmin_1::get_examples());
+}
+
+TEST_F(ValidationTest, argmin_1) {
+  const Model model = argmin_1::createTestModel();
+  const std::vector<Request> requests = createRequests(argmin_1::get_examples());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, argmin_1_relaxed) {
+  generated_tests::Execute(device,
+                           argmin_1::createTestModel_relaxed,
+                           argmin_1::is_ignored_relaxed,
+                           argmin_1::get_examples_relaxed());
+}
+
+TEST_F(ValidationTest, argmin_1_relaxed) {
+  const Model model = argmin_1::createTestModel_relaxed();
+  const std::vector<Request> requests = createRequests(argmin_1::get_examples_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, argmin_1_float16) {
+  generated_tests::Execute(device,
+                           argmin_1::createTestModel_float16,
+                           argmin_1::is_ignored_float16,
+                           argmin_1::get_examples_float16());
+}
+
+TEST_F(ValidationTest, argmin_1_float16) {
+  const Model model = argmin_1::createTestModel_float16();
+  const std::vector<Request> requests = createRequests(argmin_1::get_examples_float16());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
 
 TEST_F(NeuralnetworksHidlTest, argmin_1_int32) {
   generated_tests::Execute(device,
-                           argmin_1_int32::createTestModel,
-                           argmin_1_int32::is_ignored,
-                           argmin_1_int32::examples);
+                           argmin_1::createTestModel_int32,
+                           argmin_1::is_ignored_int32,
+                           argmin_1::get_examples_int32());
 }
 
 TEST_F(ValidationTest, argmin_1_int32) {
-  const Model model = argmin_1_int32::createTestModel();
-  const std::vector<Request> requests = createRequests(argmin_1_int32::examples);
+  const Model model = argmin_1::createTestModel_int32();
+  const std::vector<Request> requests = createRequests(argmin_1::get_examples_int32());
   validateModel(model);
   validateRequests(model, requests);
 }
 
 
-// Generated from: argmin_1_quant8.mod.py.
-namespace argmin_1_quant8 {
-// Generated argmin_1_quant8 test
-#include "examples/argmin_1_quant8.example.cpp"
-// Generated model constructor
-#include "vts_models/argmin_1_quant8.model.cpp"
-} // namespace argmin_1_quant8
-
 TEST_F(NeuralnetworksHidlTest, argmin_1_quant8) {
   generated_tests::Execute(device,
-                           argmin_1_quant8::createTestModel,
-                           argmin_1_quant8::is_ignored,
-                           argmin_1_quant8::examples);
+                           argmin_1::createTestModel_quant8,
+                           argmin_1::is_ignored_quant8,
+                           argmin_1::get_examples_quant8());
 }
 
 TEST_F(ValidationTest, argmin_1_quant8) {
-  const Model model = argmin_1_quant8::createTestModel();
-  const std::vector<Request> requests = createRequests(argmin_1_quant8::examples);
+  const Model model = argmin_1::createTestModel_quant8();
+  const std::vector<Request> requests = createRequests(argmin_1::get_examples_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
 
 
-// Generated from: argmin_2_float.mod.py.
-namespace argmin_2_float {
-// Generated argmin_2_float test
-#include "examples/argmin_2_float.example.cpp"
+// Generated from: argmin_2.mod.py.
+namespace argmin_2 {
+// Generated argmin_2 test
+#include "examples/argmin_2.example.cpp"
 // Generated model constructor
-#include "vts_models/argmin_2_float.model.cpp"
-} // namespace argmin_2_float
+#include "vts_models/argmin_2.model.cpp"
+} // namespace argmin_2
 
-TEST_F(NeuralnetworksHidlTest, argmin_2_float) {
+TEST_F(NeuralnetworksHidlTest, argmin_2) {
   generated_tests::Execute(device,
-                           argmin_2_float::createTestModel,
-                           argmin_2_float::is_ignored,
-                           argmin_2_float::examples);
+                           argmin_2::createTestModel,
+                           argmin_2::is_ignored,
+                           argmin_2::get_examples());
 }
 
-TEST_F(ValidationTest, argmin_2_float) {
-  const Model model = argmin_2_float::createTestModel();
-  const std::vector<Request> requests = createRequests(argmin_2_float::examples);
+TEST_F(ValidationTest, argmin_2) {
+  const Model model = argmin_2::createTestModel();
+  const std::vector<Request> requests = createRequests(argmin_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
 
 
-// Generated from: argmin_2_float_relaxed.mod.py.
-namespace argmin_2_float_relaxed {
-// Generated argmin_2_float_relaxed test
-#include "examples/argmin_2_float_relaxed.example.cpp"
-// Generated model constructor
-#include "vts_models/argmin_2_float_relaxed.model.cpp"
-} // namespace argmin_2_float_relaxed
-
-TEST_F(NeuralnetworksHidlTest, argmin_2_float_relaxed) {
+TEST_F(NeuralnetworksHidlTest, argmin_2_relaxed) {
   generated_tests::Execute(device,
-                           argmin_2_float_relaxed::createTestModel,
-                           argmin_2_float_relaxed::is_ignored,
-                           argmin_2_float_relaxed::examples);
+                           argmin_2::createTestModel_relaxed,
+                           argmin_2::is_ignored_relaxed,
+                           argmin_2::get_examples_relaxed());
 }
 
-TEST_F(ValidationTest, argmin_2_float_relaxed) {
-  const Model model = argmin_2_float_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(argmin_2_float_relaxed::examples);
+TEST_F(ValidationTest, argmin_2_relaxed) {
+  const Model model = argmin_2::createTestModel_relaxed();
+  const std::vector<Request> requests = createRequests(argmin_2::get_examples_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
 
 
-// Generated from: argmin_2_int32.mod.py.
-namespace argmin_2_int32 {
-// Generated argmin_2_int32 test
-#include "examples/argmin_2_int32.example.cpp"
-// Generated model constructor
-#include "vts_models/argmin_2_int32.model.cpp"
-} // namespace argmin_2_int32
+TEST_F(NeuralnetworksHidlTest, argmin_2_float16) {
+  generated_tests::Execute(device,
+                           argmin_2::createTestModel_float16,
+                           argmin_2::is_ignored_float16,
+                           argmin_2::get_examples_float16());
+}
+
+TEST_F(ValidationTest, argmin_2_float16) {
+  const Model model = argmin_2::createTestModel_float16();
+  const std::vector<Request> requests = createRequests(argmin_2::get_examples_float16());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
 
 TEST_F(NeuralnetworksHidlTest, argmin_2_int32) {
   generated_tests::Execute(device,
-                           argmin_2_int32::createTestModel,
-                           argmin_2_int32::is_ignored,
-                           argmin_2_int32::examples);
+                           argmin_2::createTestModel_int32,
+                           argmin_2::is_ignored_int32,
+                           argmin_2::get_examples_int32());
 }
 
 TEST_F(ValidationTest, argmin_2_int32) {
-  const Model model = argmin_2_int32::createTestModel();
-  const std::vector<Request> requests = createRequests(argmin_2_int32::examples);
+  const Model model = argmin_2::createTestModel_int32();
+  const std::vector<Request> requests = createRequests(argmin_2::get_examples_int32());
   validateModel(model);
   validateRequests(model, requests);
 }
 
 
-// Generated from: argmin_2_quant8.mod.py.
-namespace argmin_2_quant8 {
-// Generated argmin_2_quant8 test
-#include "examples/argmin_2_quant8.example.cpp"
-// Generated model constructor
-#include "vts_models/argmin_2_quant8.model.cpp"
-} // namespace argmin_2_quant8
-
 TEST_F(NeuralnetworksHidlTest, argmin_2_quant8) {
   generated_tests::Execute(device,
-                           argmin_2_quant8::createTestModel,
-                           argmin_2_quant8::is_ignored,
-                           argmin_2_quant8::examples);
+                           argmin_2::createTestModel_quant8,
+                           argmin_2::is_ignored_quant8,
+                           argmin_2::get_examples_quant8());
 }
 
 TEST_F(ValidationTest, argmin_2_quant8) {
-  const Model model = argmin_2_quant8::createTestModel();
-  const std::vector<Request> requests = createRequests(argmin_2_quant8::examples);
+  const Model model = argmin_2::createTestModel_quant8();
+  const std::vector<Request> requests = createRequests(argmin_2::get_examples_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
 
 
-// Generated from: argmin_3_float.mod.py.
-namespace argmin_3_float {
-// Generated argmin_3_float test
-#include "examples/argmin_3_float.example.cpp"
+// Generated from: argmin_3.mod.py.
+namespace argmin_3 {
+// Generated argmin_3 test
+#include "examples/argmin_3.example.cpp"
 // Generated model constructor
-#include "vts_models/argmin_3_float.model.cpp"
-} // namespace argmin_3_float
+#include "vts_models/argmin_3.model.cpp"
+} // namespace argmin_3
 
-TEST_F(NeuralnetworksHidlTest, argmin_3_float) {
+TEST_F(NeuralnetworksHidlTest, argmin_3) {
   generated_tests::Execute(device,
-                           argmin_3_float::createTestModel,
-                           argmin_3_float::is_ignored,
-                           argmin_3_float::examples);
+                           argmin_3::createTestModel,
+                           argmin_3::is_ignored,
+                           argmin_3::get_examples());
 }
 
-TEST_F(ValidationTest, argmin_3_float) {
-  const Model model = argmin_3_float::createTestModel();
-  const std::vector<Request> requests = createRequests(argmin_3_float::examples);
+TEST_F(ValidationTest, argmin_3) {
+  const Model model = argmin_3::createTestModel();
+  const std::vector<Request> requests = createRequests(argmin_3::get_examples());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, argmin_3_relaxed) {
+  generated_tests::Execute(device,
+                           argmin_3::createTestModel_relaxed,
+                           argmin_3::is_ignored_relaxed,
+                           argmin_3::get_examples_relaxed());
+}
+
+TEST_F(ValidationTest, argmin_3_relaxed) {
+  const Model model = argmin_3::createTestModel_relaxed();
+  const std::vector<Request> requests = createRequests(argmin_3::get_examples_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, argmin_3_float16) {
+  generated_tests::Execute(device,
+                           argmin_3::createTestModel_float16,
+                           argmin_3::is_ignored_float16,
+                           argmin_3::get_examples_float16());
+}
+
+TEST_F(ValidationTest, argmin_3_float16) {
+  const Model model = argmin_3::createTestModel_float16();
+  const std::vector<Request> requests = createRequests(argmin_3::get_examples_float16());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, argmin_3_int32) {
+  generated_tests::Execute(device,
+                           argmin_3::createTestModel_int32,
+                           argmin_3::is_ignored_int32,
+                           argmin_3::get_examples_int32());
+}
+
+TEST_F(ValidationTest, argmin_3_int32) {
+  const Model model = argmin_3::createTestModel_int32();
+  const std::vector<Request> requests = createRequests(argmin_3::get_examples_int32());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, argmin_3_quant8) {
+  generated_tests::Execute(device,
+                           argmin_3::createTestModel_quant8,
+                           argmin_3::is_ignored_quant8,
+                           argmin_3::get_examples_quant8());
+}
+
+TEST_F(ValidationTest, argmin_3_quant8) {
+  const Model model = argmin_3::createTestModel_quant8();
+  const std::vector<Request> requests = createRequests(argmin_3::get_examples_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -473,12 +610,12 @@
   generated_tests::Execute(device,
                            avg_pool_v1_2::createTestModel_nhwc,
                            avg_pool_v1_2::is_ignored_nhwc,
-                           avg_pool_v1_2::examples_nhwc);
+                           avg_pool_v1_2::get_examples_nhwc());
 }
 
 TEST_F(ValidationTest, avg_pool_v1_2_nhwc) {
   const Model model = avg_pool_v1_2::createTestModel_nhwc();
-  const std::vector<Request> requests = createRequests(avg_pool_v1_2::examples_nhwc);
+  const std::vector<Request> requests = createRequests(avg_pool_v1_2::get_examples_nhwc());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -488,12 +625,12 @@
   generated_tests::Execute(device,
                            avg_pool_v1_2::createTestModel_nhwc_relaxed,
                            avg_pool_v1_2::is_ignored_nhwc_relaxed,
-                           avg_pool_v1_2::examples_nhwc_relaxed);
+                           avg_pool_v1_2::get_examples_nhwc_relaxed());
 }
 
 TEST_F(ValidationTest, avg_pool_v1_2_nhwc_relaxed) {
   const Model model = avg_pool_v1_2::createTestModel_nhwc_relaxed();
-  const std::vector<Request> requests = createRequests(avg_pool_v1_2::examples_nhwc_relaxed);
+  const std::vector<Request> requests = createRequests(avg_pool_v1_2::get_examples_nhwc_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -503,12 +640,12 @@
   generated_tests::Execute(device,
                            avg_pool_v1_2::createTestModel_nhwc_quant8,
                            avg_pool_v1_2::is_ignored_nhwc_quant8,
-                           avg_pool_v1_2::examples_nhwc_quant8);
+                           avg_pool_v1_2::get_examples_nhwc_quant8());
 }
 
 TEST_F(ValidationTest, avg_pool_v1_2_nhwc_quant8) {
   const Model model = avg_pool_v1_2::createTestModel_nhwc_quant8();
-  const std::vector<Request> requests = createRequests(avg_pool_v1_2::examples_nhwc_quant8);
+  const std::vector<Request> requests = createRequests(avg_pool_v1_2::get_examples_nhwc_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -518,12 +655,12 @@
   generated_tests::Execute(device,
                            avg_pool_v1_2::createTestModel_nchw,
                            avg_pool_v1_2::is_ignored_nchw,
-                           avg_pool_v1_2::examples_nchw);
+                           avg_pool_v1_2::get_examples_nchw());
 }
 
 TEST_F(ValidationTest, avg_pool_v1_2_nchw) {
   const Model model = avg_pool_v1_2::createTestModel_nchw();
-  const std::vector<Request> requests = createRequests(avg_pool_v1_2::examples_nchw);
+  const std::vector<Request> requests = createRequests(avg_pool_v1_2::get_examples_nchw());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -533,12 +670,12 @@
   generated_tests::Execute(device,
                            avg_pool_v1_2::createTestModel_nchw_relaxed,
                            avg_pool_v1_2::is_ignored_nchw_relaxed,
-                           avg_pool_v1_2::examples_nchw_relaxed);
+                           avg_pool_v1_2::get_examples_nchw_relaxed());
 }
 
 TEST_F(ValidationTest, avg_pool_v1_2_nchw_relaxed) {
   const Model model = avg_pool_v1_2::createTestModel_nchw_relaxed();
-  const std::vector<Request> requests = createRequests(avg_pool_v1_2::examples_nchw_relaxed);
+  const std::vector<Request> requests = createRequests(avg_pool_v1_2::get_examples_nchw_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -548,12 +685,12 @@
   generated_tests::Execute(device,
                            avg_pool_v1_2::createTestModel_nchw_quant8,
                            avg_pool_v1_2::is_ignored_nchw_quant8,
-                           avg_pool_v1_2::examples_nchw_quant8);
+                           avg_pool_v1_2::get_examples_nchw_quant8());
 }
 
 TEST_F(ValidationTest, avg_pool_v1_2_nchw_quant8) {
   const Model model = avg_pool_v1_2::createTestModel_nchw_quant8();
-  const std::vector<Request> requests = createRequests(avg_pool_v1_2::examples_nchw_quant8);
+  const std::vector<Request> requests = createRequests(avg_pool_v1_2::get_examples_nchw_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -563,12 +700,12 @@
   generated_tests::Execute(device,
                            avg_pool_v1_2::createTestModel_nhwc_2,
                            avg_pool_v1_2::is_ignored_nhwc_2,
-                           avg_pool_v1_2::examples_nhwc_2);
+                           avg_pool_v1_2::get_examples_nhwc_2());
 }
 
 TEST_F(ValidationTest, avg_pool_v1_2_nhwc_2) {
   const Model model = avg_pool_v1_2::createTestModel_nhwc_2();
-  const std::vector<Request> requests = createRequests(avg_pool_v1_2::examples_nhwc_2);
+  const std::vector<Request> requests = createRequests(avg_pool_v1_2::get_examples_nhwc_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -578,12 +715,12 @@
   generated_tests::Execute(device,
                            avg_pool_v1_2::createTestModel_nhwc_relaxed_2,
                            avg_pool_v1_2::is_ignored_nhwc_relaxed_2,
-                           avg_pool_v1_2::examples_nhwc_relaxed_2);
+                           avg_pool_v1_2::get_examples_nhwc_relaxed_2());
 }
 
 TEST_F(ValidationTest, avg_pool_v1_2_nhwc_relaxed_2) {
   const Model model = avg_pool_v1_2::createTestModel_nhwc_relaxed_2();
-  const std::vector<Request> requests = createRequests(avg_pool_v1_2::examples_nhwc_relaxed_2);
+  const std::vector<Request> requests = createRequests(avg_pool_v1_2::get_examples_nhwc_relaxed_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -593,12 +730,12 @@
   generated_tests::Execute(device,
                            avg_pool_v1_2::createTestModel_nhwc_quant8_2,
                            avg_pool_v1_2::is_ignored_nhwc_quant8_2,
-                           avg_pool_v1_2::examples_nhwc_quant8_2);
+                           avg_pool_v1_2::get_examples_nhwc_quant8_2());
 }
 
 TEST_F(ValidationTest, avg_pool_v1_2_nhwc_quant8_2) {
   const Model model = avg_pool_v1_2::createTestModel_nhwc_quant8_2();
-  const std::vector<Request> requests = createRequests(avg_pool_v1_2::examples_nhwc_quant8_2);
+  const std::vector<Request> requests = createRequests(avg_pool_v1_2::get_examples_nhwc_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -608,12 +745,12 @@
   generated_tests::Execute(device,
                            avg_pool_v1_2::createTestModel_nchw_2,
                            avg_pool_v1_2::is_ignored_nchw_2,
-                           avg_pool_v1_2::examples_nchw_2);
+                           avg_pool_v1_2::get_examples_nchw_2());
 }
 
 TEST_F(ValidationTest, avg_pool_v1_2_nchw_2) {
   const Model model = avg_pool_v1_2::createTestModel_nchw_2();
-  const std::vector<Request> requests = createRequests(avg_pool_v1_2::examples_nchw_2);
+  const std::vector<Request> requests = createRequests(avg_pool_v1_2::get_examples_nchw_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -623,12 +760,12 @@
   generated_tests::Execute(device,
                            avg_pool_v1_2::createTestModel_nchw_relaxed_2,
                            avg_pool_v1_2::is_ignored_nchw_relaxed_2,
-                           avg_pool_v1_2::examples_nchw_relaxed_2);
+                           avg_pool_v1_2::get_examples_nchw_relaxed_2());
 }
 
 TEST_F(ValidationTest, avg_pool_v1_2_nchw_relaxed_2) {
   const Model model = avg_pool_v1_2::createTestModel_nchw_relaxed_2();
-  const std::vector<Request> requests = createRequests(avg_pool_v1_2::examples_nchw_relaxed_2);
+  const std::vector<Request> requests = createRequests(avg_pool_v1_2::get_examples_nchw_relaxed_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -638,12 +775,12 @@
   generated_tests::Execute(device,
                            avg_pool_v1_2::createTestModel_nchw_quant8_2,
                            avg_pool_v1_2::is_ignored_nchw_quant8_2,
-                           avg_pool_v1_2::examples_nchw_quant8_2);
+                           avg_pool_v1_2::get_examples_nchw_quant8_2());
 }
 
 TEST_F(ValidationTest, avg_pool_v1_2_nchw_quant8_2) {
   const Model model = avg_pool_v1_2::createTestModel_nchw_quant8_2();
-  const std::vector<Request> requests = createRequests(avg_pool_v1_2::examples_nchw_quant8_2);
+  const std::vector<Request> requests = createRequests(avg_pool_v1_2::get_examples_nchw_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -653,12 +790,12 @@
   generated_tests::Execute(device,
                            avg_pool_v1_2::createTestModel_nhwc_3,
                            avg_pool_v1_2::is_ignored_nhwc_3,
-                           avg_pool_v1_2::examples_nhwc_3);
+                           avg_pool_v1_2::get_examples_nhwc_3());
 }
 
 TEST_F(ValidationTest, avg_pool_v1_2_nhwc_3) {
   const Model model = avg_pool_v1_2::createTestModel_nhwc_3();
-  const std::vector<Request> requests = createRequests(avg_pool_v1_2::examples_nhwc_3);
+  const std::vector<Request> requests = createRequests(avg_pool_v1_2::get_examples_nhwc_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -668,12 +805,12 @@
   generated_tests::Execute(device,
                            avg_pool_v1_2::createTestModel_nhwc_relaxed_3,
                            avg_pool_v1_2::is_ignored_nhwc_relaxed_3,
-                           avg_pool_v1_2::examples_nhwc_relaxed_3);
+                           avg_pool_v1_2::get_examples_nhwc_relaxed_3());
 }
 
 TEST_F(ValidationTest, avg_pool_v1_2_nhwc_relaxed_3) {
   const Model model = avg_pool_v1_2::createTestModel_nhwc_relaxed_3();
-  const std::vector<Request> requests = createRequests(avg_pool_v1_2::examples_nhwc_relaxed_3);
+  const std::vector<Request> requests = createRequests(avg_pool_v1_2::get_examples_nhwc_relaxed_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -683,12 +820,12 @@
   generated_tests::Execute(device,
                            avg_pool_v1_2::createTestModel_nhwc_quant8_3,
                            avg_pool_v1_2::is_ignored_nhwc_quant8_3,
-                           avg_pool_v1_2::examples_nhwc_quant8_3);
+                           avg_pool_v1_2::get_examples_nhwc_quant8_3());
 }
 
 TEST_F(ValidationTest, avg_pool_v1_2_nhwc_quant8_3) {
   const Model model = avg_pool_v1_2::createTestModel_nhwc_quant8_3();
-  const std::vector<Request> requests = createRequests(avg_pool_v1_2::examples_nhwc_quant8_3);
+  const std::vector<Request> requests = createRequests(avg_pool_v1_2::get_examples_nhwc_quant8_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -698,12 +835,12 @@
   generated_tests::Execute(device,
                            avg_pool_v1_2::createTestModel_nchw_3,
                            avg_pool_v1_2::is_ignored_nchw_3,
-                           avg_pool_v1_2::examples_nchw_3);
+                           avg_pool_v1_2::get_examples_nchw_3());
 }
 
 TEST_F(ValidationTest, avg_pool_v1_2_nchw_3) {
   const Model model = avg_pool_v1_2::createTestModel_nchw_3();
-  const std::vector<Request> requests = createRequests(avg_pool_v1_2::examples_nchw_3);
+  const std::vector<Request> requests = createRequests(avg_pool_v1_2::get_examples_nchw_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -713,12 +850,12 @@
   generated_tests::Execute(device,
                            avg_pool_v1_2::createTestModel_nchw_relaxed_3,
                            avg_pool_v1_2::is_ignored_nchw_relaxed_3,
-                           avg_pool_v1_2::examples_nchw_relaxed_3);
+                           avg_pool_v1_2::get_examples_nchw_relaxed_3());
 }
 
 TEST_F(ValidationTest, avg_pool_v1_2_nchw_relaxed_3) {
   const Model model = avg_pool_v1_2::createTestModel_nchw_relaxed_3();
-  const std::vector<Request> requests = createRequests(avg_pool_v1_2::examples_nchw_relaxed_3);
+  const std::vector<Request> requests = createRequests(avg_pool_v1_2::get_examples_nchw_relaxed_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -728,12 +865,12 @@
   generated_tests::Execute(device,
                            avg_pool_v1_2::createTestModel_nchw_quant8_3,
                            avg_pool_v1_2::is_ignored_nchw_quant8_3,
-                           avg_pool_v1_2::examples_nchw_quant8_3);
+                           avg_pool_v1_2::get_examples_nchw_quant8_3());
 }
 
 TEST_F(ValidationTest, avg_pool_v1_2_nchw_quant8_3) {
   const Model model = avg_pool_v1_2::createTestModel_nchw_quant8_3();
-  const std::vector<Request> requests = createRequests(avg_pool_v1_2::examples_nchw_quant8_3);
+  const std::vector<Request> requests = createRequests(avg_pool_v1_2::get_examples_nchw_quant8_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -743,12 +880,12 @@
   generated_tests::Execute(device,
                            avg_pool_v1_2::createTestModel_nhwc_4,
                            avg_pool_v1_2::is_ignored_nhwc_4,
-                           avg_pool_v1_2::examples_nhwc_4);
+                           avg_pool_v1_2::get_examples_nhwc_4());
 }
 
 TEST_F(ValidationTest, avg_pool_v1_2_nhwc_4) {
   const Model model = avg_pool_v1_2::createTestModel_nhwc_4();
-  const std::vector<Request> requests = createRequests(avg_pool_v1_2::examples_nhwc_4);
+  const std::vector<Request> requests = createRequests(avg_pool_v1_2::get_examples_nhwc_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -758,12 +895,12 @@
   generated_tests::Execute(device,
                            avg_pool_v1_2::createTestModel_nhwc_relaxed_4,
                            avg_pool_v1_2::is_ignored_nhwc_relaxed_4,
-                           avg_pool_v1_2::examples_nhwc_relaxed_4);
+                           avg_pool_v1_2::get_examples_nhwc_relaxed_4());
 }
 
 TEST_F(ValidationTest, avg_pool_v1_2_nhwc_relaxed_4) {
   const Model model = avg_pool_v1_2::createTestModel_nhwc_relaxed_4();
-  const std::vector<Request> requests = createRequests(avg_pool_v1_2::examples_nhwc_relaxed_4);
+  const std::vector<Request> requests = createRequests(avg_pool_v1_2::get_examples_nhwc_relaxed_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -773,12 +910,12 @@
   generated_tests::Execute(device,
                            avg_pool_v1_2::createTestModel_nhwc_quant8_4,
                            avg_pool_v1_2::is_ignored_nhwc_quant8_4,
-                           avg_pool_v1_2::examples_nhwc_quant8_4);
+                           avg_pool_v1_2::get_examples_nhwc_quant8_4());
 }
 
 TEST_F(ValidationTest, avg_pool_v1_2_nhwc_quant8_4) {
   const Model model = avg_pool_v1_2::createTestModel_nhwc_quant8_4();
-  const std::vector<Request> requests = createRequests(avg_pool_v1_2::examples_nhwc_quant8_4);
+  const std::vector<Request> requests = createRequests(avg_pool_v1_2::get_examples_nhwc_quant8_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -788,12 +925,12 @@
   generated_tests::Execute(device,
                            avg_pool_v1_2::createTestModel_nchw_4,
                            avg_pool_v1_2::is_ignored_nchw_4,
-                           avg_pool_v1_2::examples_nchw_4);
+                           avg_pool_v1_2::get_examples_nchw_4());
 }
 
 TEST_F(ValidationTest, avg_pool_v1_2_nchw_4) {
   const Model model = avg_pool_v1_2::createTestModel_nchw_4();
-  const std::vector<Request> requests = createRequests(avg_pool_v1_2::examples_nchw_4);
+  const std::vector<Request> requests = createRequests(avg_pool_v1_2::get_examples_nchw_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -803,12 +940,12 @@
   generated_tests::Execute(device,
                            avg_pool_v1_2::createTestModel_nchw_relaxed_4,
                            avg_pool_v1_2::is_ignored_nchw_relaxed_4,
-                           avg_pool_v1_2::examples_nchw_relaxed_4);
+                           avg_pool_v1_2::get_examples_nchw_relaxed_4());
 }
 
 TEST_F(ValidationTest, avg_pool_v1_2_nchw_relaxed_4) {
   const Model model = avg_pool_v1_2::createTestModel_nchw_relaxed_4();
-  const std::vector<Request> requests = createRequests(avg_pool_v1_2::examples_nchw_relaxed_4);
+  const std::vector<Request> requests = createRequests(avg_pool_v1_2::get_examples_nchw_relaxed_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -818,12 +955,12 @@
   generated_tests::Execute(device,
                            avg_pool_v1_2::createTestModel_nchw_quant8_4,
                            avg_pool_v1_2::is_ignored_nchw_quant8_4,
-                           avg_pool_v1_2::examples_nchw_quant8_4);
+                           avg_pool_v1_2::get_examples_nchw_quant8_4());
 }
 
 TEST_F(ValidationTest, avg_pool_v1_2_nchw_quant8_4) {
   const Model model = avg_pool_v1_2::createTestModel_nchw_quant8_4();
-  const std::vector<Request> requests = createRequests(avg_pool_v1_2::examples_nchw_quant8_4);
+  const std::vector<Request> requests = createRequests(avg_pool_v1_2::get_examples_nchw_quant8_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -833,12 +970,12 @@
   generated_tests::Execute(device,
                            avg_pool_v1_2::createTestModel_nhwc_5,
                            avg_pool_v1_2::is_ignored_nhwc_5,
-                           avg_pool_v1_2::examples_nhwc_5);
+                           avg_pool_v1_2::get_examples_nhwc_5());
 }
 
 TEST_F(ValidationTest, avg_pool_v1_2_nhwc_5) {
   const Model model = avg_pool_v1_2::createTestModel_nhwc_5();
-  const std::vector<Request> requests = createRequests(avg_pool_v1_2::examples_nhwc_5);
+  const std::vector<Request> requests = createRequests(avg_pool_v1_2::get_examples_nhwc_5());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -848,12 +985,12 @@
   generated_tests::Execute(device,
                            avg_pool_v1_2::createTestModel_nhwc_relaxed_5,
                            avg_pool_v1_2::is_ignored_nhwc_relaxed_5,
-                           avg_pool_v1_2::examples_nhwc_relaxed_5);
+                           avg_pool_v1_2::get_examples_nhwc_relaxed_5());
 }
 
 TEST_F(ValidationTest, avg_pool_v1_2_nhwc_relaxed_5) {
   const Model model = avg_pool_v1_2::createTestModel_nhwc_relaxed_5();
-  const std::vector<Request> requests = createRequests(avg_pool_v1_2::examples_nhwc_relaxed_5);
+  const std::vector<Request> requests = createRequests(avg_pool_v1_2::get_examples_nhwc_relaxed_5());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -863,12 +1000,12 @@
   generated_tests::Execute(device,
                            avg_pool_v1_2::createTestModel_nhwc_quant8_5,
                            avg_pool_v1_2::is_ignored_nhwc_quant8_5,
-                           avg_pool_v1_2::examples_nhwc_quant8_5);
+                           avg_pool_v1_2::get_examples_nhwc_quant8_5());
 }
 
 TEST_F(ValidationTest, avg_pool_v1_2_nhwc_quant8_5) {
   const Model model = avg_pool_v1_2::createTestModel_nhwc_quant8_5();
-  const std::vector<Request> requests = createRequests(avg_pool_v1_2::examples_nhwc_quant8_5);
+  const std::vector<Request> requests = createRequests(avg_pool_v1_2::get_examples_nhwc_quant8_5());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -878,12 +1015,12 @@
   generated_tests::Execute(device,
                            avg_pool_v1_2::createTestModel_nchw_5,
                            avg_pool_v1_2::is_ignored_nchw_5,
-                           avg_pool_v1_2::examples_nchw_5);
+                           avg_pool_v1_2::get_examples_nchw_5());
 }
 
 TEST_F(ValidationTest, avg_pool_v1_2_nchw_5) {
   const Model model = avg_pool_v1_2::createTestModel_nchw_5();
-  const std::vector<Request> requests = createRequests(avg_pool_v1_2::examples_nchw_5);
+  const std::vector<Request> requests = createRequests(avg_pool_v1_2::get_examples_nchw_5());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -893,12 +1030,12 @@
   generated_tests::Execute(device,
                            avg_pool_v1_2::createTestModel_nchw_relaxed_5,
                            avg_pool_v1_2::is_ignored_nchw_relaxed_5,
-                           avg_pool_v1_2::examples_nchw_relaxed_5);
+                           avg_pool_v1_2::get_examples_nchw_relaxed_5());
 }
 
 TEST_F(ValidationTest, avg_pool_v1_2_nchw_relaxed_5) {
   const Model model = avg_pool_v1_2::createTestModel_nchw_relaxed_5();
-  const std::vector<Request> requests = createRequests(avg_pool_v1_2::examples_nchw_relaxed_5);
+  const std::vector<Request> requests = createRequests(avg_pool_v1_2::get_examples_nchw_relaxed_5());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -908,12 +1045,12 @@
   generated_tests::Execute(device,
                            avg_pool_v1_2::createTestModel_nchw_quant8_5,
                            avg_pool_v1_2::is_ignored_nchw_quant8_5,
-                           avg_pool_v1_2::examples_nchw_quant8_5);
+                           avg_pool_v1_2::get_examples_nchw_quant8_5());
 }
 
 TEST_F(ValidationTest, avg_pool_v1_2_nchw_quant8_5) {
   const Model model = avg_pool_v1_2::createTestModel_nchw_quant8_5();
-  const std::vector<Request> requests = createRequests(avg_pool_v1_2::examples_nchw_quant8_5);
+  const std::vector<Request> requests = createRequests(avg_pool_v1_2::get_examples_nchw_quant8_5());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -931,12 +1068,12 @@
   generated_tests::Execute(device,
                            axis_aligned_bbox_transform::createTestModel,
                            axis_aligned_bbox_transform::is_ignored,
-                           axis_aligned_bbox_transform::examples);
+                           axis_aligned_bbox_transform::get_examples());
 }
 
 TEST_F(ValidationTest, axis_aligned_bbox_transform) {
   const Model model = axis_aligned_bbox_transform::createTestModel();
-  const std::vector<Request> requests = createRequests(axis_aligned_bbox_transform::examples);
+  const std::vector<Request> requests = createRequests(axis_aligned_bbox_transform::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -946,12 +1083,12 @@
   generated_tests::Execute(device,
                            axis_aligned_bbox_transform::createTestModel_relaxed,
                            axis_aligned_bbox_transform::is_ignored_relaxed,
-                           axis_aligned_bbox_transform::examples_relaxed);
+                           axis_aligned_bbox_transform::get_examples_relaxed());
 }
 
 TEST_F(ValidationTest, axis_aligned_bbox_transform_relaxed) {
   const Model model = axis_aligned_bbox_transform::createTestModel_relaxed();
-  const std::vector<Request> requests = createRequests(axis_aligned_bbox_transform::examples_relaxed);
+  const std::vector<Request> requests = createRequests(axis_aligned_bbox_transform::get_examples_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -961,12 +1098,12 @@
   generated_tests::Execute(device,
                            axis_aligned_bbox_transform::createTestModel_2,
                            axis_aligned_bbox_transform::is_ignored_2,
-                           axis_aligned_bbox_transform::examples_2);
+                           axis_aligned_bbox_transform::get_examples_2());
 }
 
 TEST_F(ValidationTest, axis_aligned_bbox_transform_2) {
   const Model model = axis_aligned_bbox_transform::createTestModel_2();
-  const std::vector<Request> requests = createRequests(axis_aligned_bbox_transform::examples_2);
+  const std::vector<Request> requests = createRequests(axis_aligned_bbox_transform::get_examples_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -976,12 +1113,12 @@
   generated_tests::Execute(device,
                            axis_aligned_bbox_transform::createTestModel_relaxed_2,
                            axis_aligned_bbox_transform::is_ignored_relaxed_2,
-                           axis_aligned_bbox_transform::examples_relaxed_2);
+                           axis_aligned_bbox_transform::get_examples_relaxed_2());
 }
 
 TEST_F(ValidationTest, axis_aligned_bbox_transform_relaxed_2) {
   const Model model = axis_aligned_bbox_transform::createTestModel_relaxed_2();
-  const std::vector<Request> requests = createRequests(axis_aligned_bbox_transform::examples_relaxed_2);
+  const std::vector<Request> requests = createRequests(axis_aligned_bbox_transform::get_examples_relaxed_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -991,12 +1128,12 @@
   generated_tests::Execute(device,
                            axis_aligned_bbox_transform::createTestModel_single_batch,
                            axis_aligned_bbox_transform::is_ignored_single_batch,
-                           axis_aligned_bbox_transform::examples_single_batch);
+                           axis_aligned_bbox_transform::get_examples_single_batch());
 }
 
 TEST_F(ValidationTest, axis_aligned_bbox_transform_single_batch) {
   const Model model = axis_aligned_bbox_transform::createTestModel_single_batch();
-  const std::vector<Request> requests = createRequests(axis_aligned_bbox_transform::examples_single_batch);
+  const std::vector<Request> requests = createRequests(axis_aligned_bbox_transform::get_examples_single_batch());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1006,12 +1143,12 @@
   generated_tests::Execute(device,
                            axis_aligned_bbox_transform::createTestModel_single_batch_relaxed,
                            axis_aligned_bbox_transform::is_ignored_single_batch_relaxed,
-                           axis_aligned_bbox_transform::examples_single_batch_relaxed);
+                           axis_aligned_bbox_transform::get_examples_single_batch_relaxed());
 }
 
 TEST_F(ValidationTest, axis_aligned_bbox_transform_single_batch_relaxed) {
   const Model model = axis_aligned_bbox_transform::createTestModel_single_batch_relaxed();
-  const std::vector<Request> requests = createRequests(axis_aligned_bbox_transform::examples_single_batch_relaxed);
+  const std::vector<Request> requests = createRequests(axis_aligned_bbox_transform::get_examples_single_batch_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1029,12 +1166,12 @@
   generated_tests::Execute(device,
                            batch_to_space_v1_2::createTestModel_nhwc,
                            batch_to_space_v1_2::is_ignored_nhwc,
-                           batch_to_space_v1_2::examples_nhwc);
+                           batch_to_space_v1_2::get_examples_nhwc());
 }
 
 TEST_F(ValidationTest, batch_to_space_v1_2_nhwc) {
   const Model model = batch_to_space_v1_2::createTestModel_nhwc();
-  const std::vector<Request> requests = createRequests(batch_to_space_v1_2::examples_nhwc);
+  const std::vector<Request> requests = createRequests(batch_to_space_v1_2::get_examples_nhwc());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1044,12 +1181,27 @@
   generated_tests::Execute(device,
                            batch_to_space_v1_2::createTestModel_nhwc_relaxed,
                            batch_to_space_v1_2::is_ignored_nhwc_relaxed,
-                           batch_to_space_v1_2::examples_nhwc_relaxed);
+                           batch_to_space_v1_2::get_examples_nhwc_relaxed());
 }
 
 TEST_F(ValidationTest, batch_to_space_v1_2_nhwc_relaxed) {
   const Model model = batch_to_space_v1_2::createTestModel_nhwc_relaxed();
-  const std::vector<Request> requests = createRequests(batch_to_space_v1_2::examples_nhwc_relaxed);
+  const std::vector<Request> requests = createRequests(batch_to_space_v1_2::get_examples_nhwc_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, batch_to_space_v1_2_nhwc_float16) {
+  generated_tests::Execute(device,
+                           batch_to_space_v1_2::createTestModel_nhwc_float16,
+                           batch_to_space_v1_2::is_ignored_nhwc_float16,
+                           batch_to_space_v1_2::get_examples_nhwc_float16());
+}
+
+TEST_F(ValidationTest, batch_to_space_v1_2_nhwc_float16) {
+  const Model model = batch_to_space_v1_2::createTestModel_nhwc_float16();
+  const std::vector<Request> requests = createRequests(batch_to_space_v1_2::get_examples_nhwc_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1059,12 +1211,12 @@
   generated_tests::Execute(device,
                            batch_to_space_v1_2::createTestModel_nhwc_quant8,
                            batch_to_space_v1_2::is_ignored_nhwc_quant8,
-                           batch_to_space_v1_2::examples_nhwc_quant8);
+                           batch_to_space_v1_2::get_examples_nhwc_quant8());
 }
 
 TEST_F(ValidationTest, batch_to_space_v1_2_nhwc_quant8) {
   const Model model = batch_to_space_v1_2::createTestModel_nhwc_quant8();
-  const std::vector<Request> requests = createRequests(batch_to_space_v1_2::examples_nhwc_quant8);
+  const std::vector<Request> requests = createRequests(batch_to_space_v1_2::get_examples_nhwc_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1074,12 +1226,12 @@
   generated_tests::Execute(device,
                            batch_to_space_v1_2::createTestModel_nchw,
                            batch_to_space_v1_2::is_ignored_nchw,
-                           batch_to_space_v1_2::examples_nchw);
+                           batch_to_space_v1_2::get_examples_nchw());
 }
 
 TEST_F(ValidationTest, batch_to_space_v1_2_nchw) {
   const Model model = batch_to_space_v1_2::createTestModel_nchw();
-  const std::vector<Request> requests = createRequests(batch_to_space_v1_2::examples_nchw);
+  const std::vector<Request> requests = createRequests(batch_to_space_v1_2::get_examples_nchw());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1089,12 +1241,27 @@
   generated_tests::Execute(device,
                            batch_to_space_v1_2::createTestModel_nchw_relaxed,
                            batch_to_space_v1_2::is_ignored_nchw_relaxed,
-                           batch_to_space_v1_2::examples_nchw_relaxed);
+                           batch_to_space_v1_2::get_examples_nchw_relaxed());
 }
 
 TEST_F(ValidationTest, batch_to_space_v1_2_nchw_relaxed) {
   const Model model = batch_to_space_v1_2::createTestModel_nchw_relaxed();
-  const std::vector<Request> requests = createRequests(batch_to_space_v1_2::examples_nchw_relaxed);
+  const std::vector<Request> requests = createRequests(batch_to_space_v1_2::get_examples_nchw_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, batch_to_space_v1_2_nchw_float16) {
+  generated_tests::Execute(device,
+                           batch_to_space_v1_2::createTestModel_nchw_float16,
+                           batch_to_space_v1_2::is_ignored_nchw_float16,
+                           batch_to_space_v1_2::get_examples_nchw_float16());
+}
+
+TEST_F(ValidationTest, batch_to_space_v1_2_nchw_float16) {
+  const Model model = batch_to_space_v1_2::createTestModel_nchw_float16();
+  const std::vector<Request> requests = createRequests(batch_to_space_v1_2::get_examples_nchw_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1104,12 +1271,12 @@
   generated_tests::Execute(device,
                            batch_to_space_v1_2::createTestModel_nchw_quant8,
                            batch_to_space_v1_2::is_ignored_nchw_quant8,
-                           batch_to_space_v1_2::examples_nchw_quant8);
+                           batch_to_space_v1_2::get_examples_nchw_quant8());
 }
 
 TEST_F(ValidationTest, batch_to_space_v1_2_nchw_quant8) {
   const Model model = batch_to_space_v1_2::createTestModel_nchw_quant8();
-  const std::vector<Request> requests = createRequests(batch_to_space_v1_2::examples_nchw_quant8);
+  const std::vector<Request> requests = createRequests(batch_to_space_v1_2::get_examples_nchw_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1119,12 +1286,12 @@
   generated_tests::Execute(device,
                            batch_to_space_v1_2::createTestModel_nhwc_2,
                            batch_to_space_v1_2::is_ignored_nhwc_2,
-                           batch_to_space_v1_2::examples_nhwc_2);
+                           batch_to_space_v1_2::get_examples_nhwc_2());
 }
 
 TEST_F(ValidationTest, batch_to_space_v1_2_nhwc_2) {
   const Model model = batch_to_space_v1_2::createTestModel_nhwc_2();
-  const std::vector<Request> requests = createRequests(batch_to_space_v1_2::examples_nhwc_2);
+  const std::vector<Request> requests = createRequests(batch_to_space_v1_2::get_examples_nhwc_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1134,12 +1301,27 @@
   generated_tests::Execute(device,
                            batch_to_space_v1_2::createTestModel_nhwc_relaxed_2,
                            batch_to_space_v1_2::is_ignored_nhwc_relaxed_2,
-                           batch_to_space_v1_2::examples_nhwc_relaxed_2);
+                           batch_to_space_v1_2::get_examples_nhwc_relaxed_2());
 }
 
 TEST_F(ValidationTest, batch_to_space_v1_2_nhwc_relaxed_2) {
   const Model model = batch_to_space_v1_2::createTestModel_nhwc_relaxed_2();
-  const std::vector<Request> requests = createRequests(batch_to_space_v1_2::examples_nhwc_relaxed_2);
+  const std::vector<Request> requests = createRequests(batch_to_space_v1_2::get_examples_nhwc_relaxed_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, batch_to_space_v1_2_nhwc_float16_2) {
+  generated_tests::Execute(device,
+                           batch_to_space_v1_2::createTestModel_nhwc_float16_2,
+                           batch_to_space_v1_2::is_ignored_nhwc_float16_2,
+                           batch_to_space_v1_2::get_examples_nhwc_float16_2());
+}
+
+TEST_F(ValidationTest, batch_to_space_v1_2_nhwc_float16_2) {
+  const Model model = batch_to_space_v1_2::createTestModel_nhwc_float16_2();
+  const std::vector<Request> requests = createRequests(batch_to_space_v1_2::get_examples_nhwc_float16_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1149,12 +1331,12 @@
   generated_tests::Execute(device,
                            batch_to_space_v1_2::createTestModel_nhwc_quant8_2,
                            batch_to_space_v1_2::is_ignored_nhwc_quant8_2,
-                           batch_to_space_v1_2::examples_nhwc_quant8_2);
+                           batch_to_space_v1_2::get_examples_nhwc_quant8_2());
 }
 
 TEST_F(ValidationTest, batch_to_space_v1_2_nhwc_quant8_2) {
   const Model model = batch_to_space_v1_2::createTestModel_nhwc_quant8_2();
-  const std::vector<Request> requests = createRequests(batch_to_space_v1_2::examples_nhwc_quant8_2);
+  const std::vector<Request> requests = createRequests(batch_to_space_v1_2::get_examples_nhwc_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1164,12 +1346,12 @@
   generated_tests::Execute(device,
                            batch_to_space_v1_2::createTestModel_nchw_2,
                            batch_to_space_v1_2::is_ignored_nchw_2,
-                           batch_to_space_v1_2::examples_nchw_2);
+                           batch_to_space_v1_2::get_examples_nchw_2());
 }
 
 TEST_F(ValidationTest, batch_to_space_v1_2_nchw_2) {
   const Model model = batch_to_space_v1_2::createTestModel_nchw_2();
-  const std::vector<Request> requests = createRequests(batch_to_space_v1_2::examples_nchw_2);
+  const std::vector<Request> requests = createRequests(batch_to_space_v1_2::get_examples_nchw_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1179,12 +1361,27 @@
   generated_tests::Execute(device,
                            batch_to_space_v1_2::createTestModel_nchw_relaxed_2,
                            batch_to_space_v1_2::is_ignored_nchw_relaxed_2,
-                           batch_to_space_v1_2::examples_nchw_relaxed_2);
+                           batch_to_space_v1_2::get_examples_nchw_relaxed_2());
 }
 
 TEST_F(ValidationTest, batch_to_space_v1_2_nchw_relaxed_2) {
   const Model model = batch_to_space_v1_2::createTestModel_nchw_relaxed_2();
-  const std::vector<Request> requests = createRequests(batch_to_space_v1_2::examples_nchw_relaxed_2);
+  const std::vector<Request> requests = createRequests(batch_to_space_v1_2::get_examples_nchw_relaxed_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, batch_to_space_v1_2_nchw_float16_2) {
+  generated_tests::Execute(device,
+                           batch_to_space_v1_2::createTestModel_nchw_float16_2,
+                           batch_to_space_v1_2::is_ignored_nchw_float16_2,
+                           batch_to_space_v1_2::get_examples_nchw_float16_2());
+}
+
+TEST_F(ValidationTest, batch_to_space_v1_2_nchw_float16_2) {
+  const Model model = batch_to_space_v1_2::createTestModel_nchw_float16_2();
+  const std::vector<Request> requests = createRequests(batch_to_space_v1_2::get_examples_nchw_float16_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1194,12 +1391,12 @@
   generated_tests::Execute(device,
                            batch_to_space_v1_2::createTestModel_nchw_quant8_2,
                            batch_to_space_v1_2::is_ignored_nchw_quant8_2,
-                           batch_to_space_v1_2::examples_nchw_quant8_2);
+                           batch_to_space_v1_2::get_examples_nchw_quant8_2());
 }
 
 TEST_F(ValidationTest, batch_to_space_v1_2_nchw_quant8_2) {
   const Model model = batch_to_space_v1_2::createTestModel_nchw_quant8_2();
-  const std::vector<Request> requests = createRequests(batch_to_space_v1_2::examples_nchw_quant8_2);
+  const std::vector<Request> requests = createRequests(batch_to_space_v1_2::get_examples_nchw_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1217,12 +1414,12 @@
   generated_tests::Execute(device,
                            cast::createTestModel,
                            cast::is_ignored,
-                           cast::examples_float16_to_float16);
+                           cast::get_examples_float16_to_float16());
 }
 
 TEST_F(ValidationTest, cast_float16_to_float16) {
   const Model model = cast::createTestModel();
-  const std::vector<Request> requests = createRequests(cast::examples_float16_to_float16);
+  const std::vector<Request> requests = createRequests(cast::get_examples_float16_to_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1232,12 +1429,12 @@
   generated_tests::Execute(device,
                            cast::createTestModel_2,
                            cast::is_ignored_2,
-                           cast::examples_float16_to_float32);
+                           cast::get_examples_float16_to_float32());
 }
 
 TEST_F(ValidationTest, cast_float16_to_float32) {
   const Model model = cast::createTestModel_2();
-  const std::vector<Request> requests = createRequests(cast::examples_float16_to_float32);
+  const std::vector<Request> requests = createRequests(cast::get_examples_float16_to_float32());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1247,12 +1444,12 @@
   generated_tests::Execute(device,
                            cast::createTestModel_relaxed,
                            cast::is_ignored_relaxed,
-                           cast::examples_float16_to_float32_relaxed);
+                           cast::get_examples_float16_to_float32_relaxed());
 }
 
 TEST_F(ValidationTest, cast_float16_to_float32_relaxed) {
   const Model model = cast::createTestModel_relaxed();
-  const std::vector<Request> requests = createRequests(cast::examples_float16_to_float32_relaxed);
+  const std::vector<Request> requests = createRequests(cast::get_examples_float16_to_float32_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1262,12 +1459,12 @@
   generated_tests::Execute(device,
                            cast::createTestModel_3,
                            cast::is_ignored_3,
-                           cast::examples_float16_to_int32);
+                           cast::get_examples_float16_to_int32());
 }
 
 TEST_F(ValidationTest, cast_float16_to_int32) {
   const Model model = cast::createTestModel_3();
-  const std::vector<Request> requests = createRequests(cast::examples_float16_to_int32);
+  const std::vector<Request> requests = createRequests(cast::get_examples_float16_to_int32());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1277,12 +1474,12 @@
   generated_tests::Execute(device,
                            cast::createTestModel_4,
                            cast::is_ignored_4,
-                           cast::examples_float16_to_quant8);
+                           cast::get_examples_float16_to_quant8());
 }
 
 TEST_F(ValidationTest, cast_float16_to_quant8) {
   const Model model = cast::createTestModel_4();
-  const std::vector<Request> requests = createRequests(cast::examples_float16_to_quant8);
+  const std::vector<Request> requests = createRequests(cast::get_examples_float16_to_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1292,12 +1489,12 @@
   generated_tests::Execute(device,
                            cast::createTestModel_5,
                            cast::is_ignored_5,
-                           cast::examples_float32_to_float16);
+                           cast::get_examples_float32_to_float16());
 }
 
 TEST_F(ValidationTest, cast_float32_to_float16) {
   const Model model = cast::createTestModel_5();
-  const std::vector<Request> requests = createRequests(cast::examples_float32_to_float16);
+  const std::vector<Request> requests = createRequests(cast::get_examples_float32_to_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1307,12 +1504,12 @@
   generated_tests::Execute(device,
                            cast::createTestModel_relaxed_2,
                            cast::is_ignored_relaxed_2,
-                           cast::examples_float32_to_float16_relaxed);
+                           cast::get_examples_float32_to_float16_relaxed());
 }
 
 TEST_F(ValidationTest, cast_float32_to_float16_relaxed) {
   const Model model = cast::createTestModel_relaxed_2();
-  const std::vector<Request> requests = createRequests(cast::examples_float32_to_float16_relaxed);
+  const std::vector<Request> requests = createRequests(cast::get_examples_float32_to_float16_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1322,12 +1519,12 @@
   generated_tests::Execute(device,
                            cast::createTestModel_6,
                            cast::is_ignored_6,
-                           cast::examples_float32_to_float32);
+                           cast::get_examples_float32_to_float32());
 }
 
 TEST_F(ValidationTest, cast_float32_to_float32) {
   const Model model = cast::createTestModel_6();
-  const std::vector<Request> requests = createRequests(cast::examples_float32_to_float32);
+  const std::vector<Request> requests = createRequests(cast::get_examples_float32_to_float32());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1337,12 +1534,12 @@
   generated_tests::Execute(device,
                            cast::createTestModel_relaxed_3,
                            cast::is_ignored_relaxed_3,
-                           cast::examples_float32_to_float32_relaxed);
+                           cast::get_examples_float32_to_float32_relaxed());
 }
 
 TEST_F(ValidationTest, cast_float32_to_float32_relaxed) {
   const Model model = cast::createTestModel_relaxed_3();
-  const std::vector<Request> requests = createRequests(cast::examples_float32_to_float32_relaxed);
+  const std::vector<Request> requests = createRequests(cast::get_examples_float32_to_float32_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1352,12 +1549,12 @@
   generated_tests::Execute(device,
                            cast::createTestModel_7,
                            cast::is_ignored_7,
-                           cast::examples_float32_to_int32);
+                           cast::get_examples_float32_to_int32());
 }
 
 TEST_F(ValidationTest, cast_float32_to_int32) {
   const Model model = cast::createTestModel_7();
-  const std::vector<Request> requests = createRequests(cast::examples_float32_to_int32);
+  const std::vector<Request> requests = createRequests(cast::get_examples_float32_to_int32());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1367,12 +1564,12 @@
   generated_tests::Execute(device,
                            cast::createTestModel_relaxed_4,
                            cast::is_ignored_relaxed_4,
-                           cast::examples_float32_to_int32_relaxed);
+                           cast::get_examples_float32_to_int32_relaxed());
 }
 
 TEST_F(ValidationTest, cast_float32_to_int32_relaxed) {
   const Model model = cast::createTestModel_relaxed_4();
-  const std::vector<Request> requests = createRequests(cast::examples_float32_to_int32_relaxed);
+  const std::vector<Request> requests = createRequests(cast::get_examples_float32_to_int32_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1382,12 +1579,12 @@
   generated_tests::Execute(device,
                            cast::createTestModel_8,
                            cast::is_ignored_8,
-                           cast::examples_float32_to_quant8);
+                           cast::get_examples_float32_to_quant8());
 }
 
 TEST_F(ValidationTest, cast_float32_to_quant8) {
   const Model model = cast::createTestModel_8();
-  const std::vector<Request> requests = createRequests(cast::examples_float32_to_quant8);
+  const std::vector<Request> requests = createRequests(cast::get_examples_float32_to_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1397,12 +1594,12 @@
   generated_tests::Execute(device,
                            cast::createTestModel_relaxed_5,
                            cast::is_ignored_relaxed_5,
-                           cast::examples_float32_to_quant8_relaxed);
+                           cast::get_examples_float32_to_quant8_relaxed());
 }
 
 TEST_F(ValidationTest, cast_float32_to_quant8_relaxed) {
   const Model model = cast::createTestModel_relaxed_5();
-  const std::vector<Request> requests = createRequests(cast::examples_float32_to_quant8_relaxed);
+  const std::vector<Request> requests = createRequests(cast::get_examples_float32_to_quant8_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1412,12 +1609,12 @@
   generated_tests::Execute(device,
                            cast::createTestModel_9,
                            cast::is_ignored_9,
-                           cast::examples_int32_to_float16);
+                           cast::get_examples_int32_to_float16());
 }
 
 TEST_F(ValidationTest, cast_int32_to_float16) {
   const Model model = cast::createTestModel_9();
-  const std::vector<Request> requests = createRequests(cast::examples_int32_to_float16);
+  const std::vector<Request> requests = createRequests(cast::get_examples_int32_to_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1427,12 +1624,12 @@
   generated_tests::Execute(device,
                            cast::createTestModel_10,
                            cast::is_ignored_10,
-                           cast::examples_int32_to_float32);
+                           cast::get_examples_int32_to_float32());
 }
 
 TEST_F(ValidationTest, cast_int32_to_float32) {
   const Model model = cast::createTestModel_10();
-  const std::vector<Request> requests = createRequests(cast::examples_int32_to_float32);
+  const std::vector<Request> requests = createRequests(cast::get_examples_int32_to_float32());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1442,12 +1639,12 @@
   generated_tests::Execute(device,
                            cast::createTestModel_relaxed_6,
                            cast::is_ignored_relaxed_6,
-                           cast::examples_int32_to_float32_relaxed);
+                           cast::get_examples_int32_to_float32_relaxed());
 }
 
 TEST_F(ValidationTest, cast_int32_to_float32_relaxed) {
   const Model model = cast::createTestModel_relaxed_6();
-  const std::vector<Request> requests = createRequests(cast::examples_int32_to_float32_relaxed);
+  const std::vector<Request> requests = createRequests(cast::get_examples_int32_to_float32_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1457,12 +1654,12 @@
   generated_tests::Execute(device,
                            cast::createTestModel_11,
                            cast::is_ignored_11,
-                           cast::examples_int32_to_int32);
+                           cast::get_examples_int32_to_int32());
 }
 
 TEST_F(ValidationTest, cast_int32_to_int32) {
   const Model model = cast::createTestModel_11();
-  const std::vector<Request> requests = createRequests(cast::examples_int32_to_int32);
+  const std::vector<Request> requests = createRequests(cast::get_examples_int32_to_int32());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1472,12 +1669,12 @@
   generated_tests::Execute(device,
                            cast::createTestModel_12,
                            cast::is_ignored_12,
-                           cast::examples_int32_to_quant8);
+                           cast::get_examples_int32_to_quant8());
 }
 
 TEST_F(ValidationTest, cast_int32_to_quant8) {
   const Model model = cast::createTestModel_12();
-  const std::vector<Request> requests = createRequests(cast::examples_int32_to_quant8);
+  const std::vector<Request> requests = createRequests(cast::get_examples_int32_to_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1487,12 +1684,12 @@
   generated_tests::Execute(device,
                            cast::createTestModel_13,
                            cast::is_ignored_13,
-                           cast::examples_quant8_to_float16);
+                           cast::get_examples_quant8_to_float16());
 }
 
 TEST_F(ValidationTest, cast_quant8_to_float16) {
   const Model model = cast::createTestModel_13();
-  const std::vector<Request> requests = createRequests(cast::examples_quant8_to_float16);
+  const std::vector<Request> requests = createRequests(cast::get_examples_quant8_to_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1502,12 +1699,12 @@
   generated_tests::Execute(device,
                            cast::createTestModel_14,
                            cast::is_ignored_14,
-                           cast::examples_quant8_to_float32);
+                           cast::get_examples_quant8_to_float32());
 }
 
 TEST_F(ValidationTest, cast_quant8_to_float32) {
   const Model model = cast::createTestModel_14();
-  const std::vector<Request> requests = createRequests(cast::examples_quant8_to_float32);
+  const std::vector<Request> requests = createRequests(cast::get_examples_quant8_to_float32());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1517,12 +1714,12 @@
   generated_tests::Execute(device,
                            cast::createTestModel_relaxed_7,
                            cast::is_ignored_relaxed_7,
-                           cast::examples_quant8_to_float32_relaxed);
+                           cast::get_examples_quant8_to_float32_relaxed());
 }
 
 TEST_F(ValidationTest, cast_quant8_to_float32_relaxed) {
   const Model model = cast::createTestModel_relaxed_7();
-  const std::vector<Request> requests = createRequests(cast::examples_quant8_to_float32_relaxed);
+  const std::vector<Request> requests = createRequests(cast::get_examples_quant8_to_float32_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1532,12 +1729,12 @@
   generated_tests::Execute(device,
                            cast::createTestModel_15,
                            cast::is_ignored_15,
-                           cast::examples_quant8_to_int32);
+                           cast::get_examples_quant8_to_int32());
 }
 
 TEST_F(ValidationTest, cast_quant8_to_int32) {
   const Model model = cast::createTestModel_15();
-  const std::vector<Request> requests = createRequests(cast::examples_quant8_to_int32);
+  const std::vector<Request> requests = createRequests(cast::get_examples_quant8_to_int32());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1547,12 +1744,12 @@
   generated_tests::Execute(device,
                            cast::createTestModel_16,
                            cast::is_ignored_16,
-                           cast::examples_quant8_to_quant8);
+                           cast::get_examples_quant8_to_quant8());
 }
 
 TEST_F(ValidationTest, cast_quant8_to_quant8) {
   const Model model = cast::createTestModel_16();
-  const std::vector<Request> requests = createRequests(cast::examples_quant8_to_quant8);
+  const std::vector<Request> requests = createRequests(cast::get_examples_quant8_to_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1570,12 +1767,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_dim4_axis0,
                            channel_shuffle::is_ignored_dim4_axis0,
-                           channel_shuffle::examples_dim4_axis0);
+                           channel_shuffle::get_examples_dim4_axis0());
 }
 
 TEST_F(ValidationTest, channel_shuffle_dim4_axis0) {
   const Model model = channel_shuffle::createTestModel_dim4_axis0();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_dim4_axis0);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_dim4_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1585,12 +1782,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_dim4_axis0_neg,
                            channel_shuffle::is_ignored_dim4_axis0_neg,
-                           channel_shuffle::examples_dim4_axis0_neg);
+                           channel_shuffle::get_examples_dim4_axis0_neg());
 }
 
 TEST_F(ValidationTest, channel_shuffle_dim4_axis0_neg) {
   const Model model = channel_shuffle::createTestModel_dim4_axis0_neg();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_dim4_axis0_neg);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_dim4_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1600,12 +1797,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_dim4_axis1,
                            channel_shuffle::is_ignored_dim4_axis1,
-                           channel_shuffle::examples_dim4_axis1);
+                           channel_shuffle::get_examples_dim4_axis1());
 }
 
 TEST_F(ValidationTest, channel_shuffle_dim4_axis1) {
   const Model model = channel_shuffle::createTestModel_dim4_axis1();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_dim4_axis1);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_dim4_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1615,12 +1812,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_dim4_axis1_neg,
                            channel_shuffle::is_ignored_dim4_axis1_neg,
-                           channel_shuffle::examples_dim4_axis1_neg);
+                           channel_shuffle::get_examples_dim4_axis1_neg());
 }
 
 TEST_F(ValidationTest, channel_shuffle_dim4_axis1_neg) {
   const Model model = channel_shuffle::createTestModel_dim4_axis1_neg();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_dim4_axis1_neg);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_dim4_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1630,12 +1827,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_dim4_axis2,
                            channel_shuffle::is_ignored_dim4_axis2,
-                           channel_shuffle::examples_dim4_axis2);
+                           channel_shuffle::get_examples_dim4_axis2());
 }
 
 TEST_F(ValidationTest, channel_shuffle_dim4_axis2) {
   const Model model = channel_shuffle::createTestModel_dim4_axis2();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_dim4_axis2);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_dim4_axis2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1645,12 +1842,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_dim4_axis2_neg,
                            channel_shuffle::is_ignored_dim4_axis2_neg,
-                           channel_shuffle::examples_dim4_axis2_neg);
+                           channel_shuffle::get_examples_dim4_axis2_neg());
 }
 
 TEST_F(ValidationTest, channel_shuffle_dim4_axis2_neg) {
   const Model model = channel_shuffle::createTestModel_dim4_axis2_neg();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_dim4_axis2_neg);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_dim4_axis2_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1660,12 +1857,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_dim4_axis3,
                            channel_shuffle::is_ignored_dim4_axis3,
-                           channel_shuffle::examples_dim4_axis3);
+                           channel_shuffle::get_examples_dim4_axis3());
 }
 
 TEST_F(ValidationTest, channel_shuffle_dim4_axis3) {
   const Model model = channel_shuffle::createTestModel_dim4_axis3();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_dim4_axis3);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_dim4_axis3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1675,12 +1872,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_dim4_axis3_neg,
                            channel_shuffle::is_ignored_dim4_axis3_neg,
-                           channel_shuffle::examples_dim4_axis3_neg);
+                           channel_shuffle::get_examples_dim4_axis3_neg());
 }
 
 TEST_F(ValidationTest, channel_shuffle_dim4_axis3_neg) {
   const Model model = channel_shuffle::createTestModel_dim4_axis3_neg();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_dim4_axis3_neg);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_dim4_axis3_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1690,12 +1887,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_dim3_axis0,
                            channel_shuffle::is_ignored_dim3_axis0,
-                           channel_shuffle::examples_dim3_axis0);
+                           channel_shuffle::get_examples_dim3_axis0());
 }
 
 TEST_F(ValidationTest, channel_shuffle_dim3_axis0) {
   const Model model = channel_shuffle::createTestModel_dim3_axis0();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_dim3_axis0);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_dim3_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1705,12 +1902,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_dim3_axis0_neg,
                            channel_shuffle::is_ignored_dim3_axis0_neg,
-                           channel_shuffle::examples_dim3_axis0_neg);
+                           channel_shuffle::get_examples_dim3_axis0_neg());
 }
 
 TEST_F(ValidationTest, channel_shuffle_dim3_axis0_neg) {
   const Model model = channel_shuffle::createTestModel_dim3_axis0_neg();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_dim3_axis0_neg);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_dim3_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1720,12 +1917,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_dim3_axis1,
                            channel_shuffle::is_ignored_dim3_axis1,
-                           channel_shuffle::examples_dim3_axis1);
+                           channel_shuffle::get_examples_dim3_axis1());
 }
 
 TEST_F(ValidationTest, channel_shuffle_dim3_axis1) {
   const Model model = channel_shuffle::createTestModel_dim3_axis1();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_dim3_axis1);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_dim3_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1735,12 +1932,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_dim3_axis1_neg,
                            channel_shuffle::is_ignored_dim3_axis1_neg,
-                           channel_shuffle::examples_dim3_axis1_neg);
+                           channel_shuffle::get_examples_dim3_axis1_neg());
 }
 
 TEST_F(ValidationTest, channel_shuffle_dim3_axis1_neg) {
   const Model model = channel_shuffle::createTestModel_dim3_axis1_neg();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_dim3_axis1_neg);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_dim3_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1750,12 +1947,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_dim3_axis2,
                            channel_shuffle::is_ignored_dim3_axis2,
-                           channel_shuffle::examples_dim3_axis2);
+                           channel_shuffle::get_examples_dim3_axis2());
 }
 
 TEST_F(ValidationTest, channel_shuffle_dim3_axis2) {
   const Model model = channel_shuffle::createTestModel_dim3_axis2();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_dim3_axis2);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_dim3_axis2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1765,12 +1962,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_dim3_axis2_neg,
                            channel_shuffle::is_ignored_dim3_axis2_neg,
-                           channel_shuffle::examples_dim3_axis2_neg);
+                           channel_shuffle::get_examples_dim3_axis2_neg());
 }
 
 TEST_F(ValidationTest, channel_shuffle_dim3_axis2_neg) {
   const Model model = channel_shuffle::createTestModel_dim3_axis2_neg();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_dim3_axis2_neg);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_dim3_axis2_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1780,12 +1977,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_dim2_axis0,
                            channel_shuffle::is_ignored_dim2_axis0,
-                           channel_shuffle::examples_dim2_axis0);
+                           channel_shuffle::get_examples_dim2_axis0());
 }
 
 TEST_F(ValidationTest, channel_shuffle_dim2_axis0) {
   const Model model = channel_shuffle::createTestModel_dim2_axis0();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_dim2_axis0);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_dim2_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1795,12 +1992,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_dim2_axis0_neg,
                            channel_shuffle::is_ignored_dim2_axis0_neg,
-                           channel_shuffle::examples_dim2_axis0_neg);
+                           channel_shuffle::get_examples_dim2_axis0_neg());
 }
 
 TEST_F(ValidationTest, channel_shuffle_dim2_axis0_neg) {
   const Model model = channel_shuffle::createTestModel_dim2_axis0_neg();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_dim2_axis0_neg);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_dim2_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1810,12 +2007,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_dim2_axis1,
                            channel_shuffle::is_ignored_dim2_axis1,
-                           channel_shuffle::examples_dim2_axis1);
+                           channel_shuffle::get_examples_dim2_axis1());
 }
 
 TEST_F(ValidationTest, channel_shuffle_dim2_axis1) {
   const Model model = channel_shuffle::createTestModel_dim2_axis1();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_dim2_axis1);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_dim2_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1825,12 +2022,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_dim2_axis1_neg,
                            channel_shuffle::is_ignored_dim2_axis1_neg,
-                           channel_shuffle::examples_dim2_axis1_neg);
+                           channel_shuffle::get_examples_dim2_axis1_neg());
 }
 
 TEST_F(ValidationTest, channel_shuffle_dim2_axis1_neg) {
   const Model model = channel_shuffle::createTestModel_dim2_axis1_neg();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_dim2_axis1_neg);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_dim2_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1840,12 +2037,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_dim1_axis0,
                            channel_shuffle::is_ignored_dim1_axis0,
-                           channel_shuffle::examples_dim1_axis0);
+                           channel_shuffle::get_examples_dim1_axis0());
 }
 
 TEST_F(ValidationTest, channel_shuffle_dim1_axis0) {
   const Model model = channel_shuffle::createTestModel_dim1_axis0();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_dim1_axis0);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_dim1_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1855,12 +2052,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_dim1_axis0_neg,
                            channel_shuffle::is_ignored_dim1_axis0_neg,
-                           channel_shuffle::examples_dim1_axis0_neg);
+                           channel_shuffle::get_examples_dim1_axis0_neg());
 }
 
 TEST_F(ValidationTest, channel_shuffle_dim1_axis0_neg) {
   const Model model = channel_shuffle::createTestModel_dim1_axis0_neg();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_dim1_axis0_neg);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_dim1_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1870,12 +2067,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_relaxed_dim4_axis0,
                            channel_shuffle::is_ignored_relaxed_dim4_axis0,
-                           channel_shuffle::examples_relaxed_dim4_axis0);
+                           channel_shuffle::get_examples_relaxed_dim4_axis0());
 }
 
 TEST_F(ValidationTest, channel_shuffle_relaxed_dim4_axis0) {
   const Model model = channel_shuffle::createTestModel_relaxed_dim4_axis0();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_relaxed_dim4_axis0);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_relaxed_dim4_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1885,12 +2082,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_relaxed_dim4_axis0_neg,
                            channel_shuffle::is_ignored_relaxed_dim4_axis0_neg,
-                           channel_shuffle::examples_relaxed_dim4_axis0_neg);
+                           channel_shuffle::get_examples_relaxed_dim4_axis0_neg());
 }
 
 TEST_F(ValidationTest, channel_shuffle_relaxed_dim4_axis0_neg) {
   const Model model = channel_shuffle::createTestModel_relaxed_dim4_axis0_neg();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_relaxed_dim4_axis0_neg);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_relaxed_dim4_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1900,12 +2097,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_relaxed_dim4_axis1,
                            channel_shuffle::is_ignored_relaxed_dim4_axis1,
-                           channel_shuffle::examples_relaxed_dim4_axis1);
+                           channel_shuffle::get_examples_relaxed_dim4_axis1());
 }
 
 TEST_F(ValidationTest, channel_shuffle_relaxed_dim4_axis1) {
   const Model model = channel_shuffle::createTestModel_relaxed_dim4_axis1();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_relaxed_dim4_axis1);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_relaxed_dim4_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1915,12 +2112,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_relaxed_dim4_axis1_neg,
                            channel_shuffle::is_ignored_relaxed_dim4_axis1_neg,
-                           channel_shuffle::examples_relaxed_dim4_axis1_neg);
+                           channel_shuffle::get_examples_relaxed_dim4_axis1_neg());
 }
 
 TEST_F(ValidationTest, channel_shuffle_relaxed_dim4_axis1_neg) {
   const Model model = channel_shuffle::createTestModel_relaxed_dim4_axis1_neg();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_relaxed_dim4_axis1_neg);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_relaxed_dim4_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1930,12 +2127,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_relaxed_dim4_axis2,
                            channel_shuffle::is_ignored_relaxed_dim4_axis2,
-                           channel_shuffle::examples_relaxed_dim4_axis2);
+                           channel_shuffle::get_examples_relaxed_dim4_axis2());
 }
 
 TEST_F(ValidationTest, channel_shuffle_relaxed_dim4_axis2) {
   const Model model = channel_shuffle::createTestModel_relaxed_dim4_axis2();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_relaxed_dim4_axis2);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_relaxed_dim4_axis2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1945,12 +2142,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_relaxed_dim4_axis2_neg,
                            channel_shuffle::is_ignored_relaxed_dim4_axis2_neg,
-                           channel_shuffle::examples_relaxed_dim4_axis2_neg);
+                           channel_shuffle::get_examples_relaxed_dim4_axis2_neg());
 }
 
 TEST_F(ValidationTest, channel_shuffle_relaxed_dim4_axis2_neg) {
   const Model model = channel_shuffle::createTestModel_relaxed_dim4_axis2_neg();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_relaxed_dim4_axis2_neg);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_relaxed_dim4_axis2_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1960,12 +2157,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_relaxed_dim4_axis3,
                            channel_shuffle::is_ignored_relaxed_dim4_axis3,
-                           channel_shuffle::examples_relaxed_dim4_axis3);
+                           channel_shuffle::get_examples_relaxed_dim4_axis3());
 }
 
 TEST_F(ValidationTest, channel_shuffle_relaxed_dim4_axis3) {
   const Model model = channel_shuffle::createTestModel_relaxed_dim4_axis3();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_relaxed_dim4_axis3);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_relaxed_dim4_axis3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1975,12 +2172,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_relaxed_dim4_axis3_neg,
                            channel_shuffle::is_ignored_relaxed_dim4_axis3_neg,
-                           channel_shuffle::examples_relaxed_dim4_axis3_neg);
+                           channel_shuffle::get_examples_relaxed_dim4_axis3_neg());
 }
 
 TEST_F(ValidationTest, channel_shuffle_relaxed_dim4_axis3_neg) {
   const Model model = channel_shuffle::createTestModel_relaxed_dim4_axis3_neg();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_relaxed_dim4_axis3_neg);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_relaxed_dim4_axis3_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -1990,12 +2187,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_relaxed_dim3_axis0,
                            channel_shuffle::is_ignored_relaxed_dim3_axis0,
-                           channel_shuffle::examples_relaxed_dim3_axis0);
+                           channel_shuffle::get_examples_relaxed_dim3_axis0());
 }
 
 TEST_F(ValidationTest, channel_shuffle_relaxed_dim3_axis0) {
   const Model model = channel_shuffle::createTestModel_relaxed_dim3_axis0();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_relaxed_dim3_axis0);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_relaxed_dim3_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2005,12 +2202,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_relaxed_dim3_axis0_neg,
                            channel_shuffle::is_ignored_relaxed_dim3_axis0_neg,
-                           channel_shuffle::examples_relaxed_dim3_axis0_neg);
+                           channel_shuffle::get_examples_relaxed_dim3_axis0_neg());
 }
 
 TEST_F(ValidationTest, channel_shuffle_relaxed_dim3_axis0_neg) {
   const Model model = channel_shuffle::createTestModel_relaxed_dim3_axis0_neg();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_relaxed_dim3_axis0_neg);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_relaxed_dim3_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2020,12 +2217,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_relaxed_dim3_axis1,
                            channel_shuffle::is_ignored_relaxed_dim3_axis1,
-                           channel_shuffle::examples_relaxed_dim3_axis1);
+                           channel_shuffle::get_examples_relaxed_dim3_axis1());
 }
 
 TEST_F(ValidationTest, channel_shuffle_relaxed_dim3_axis1) {
   const Model model = channel_shuffle::createTestModel_relaxed_dim3_axis1();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_relaxed_dim3_axis1);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_relaxed_dim3_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2035,12 +2232,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_relaxed_dim3_axis1_neg,
                            channel_shuffle::is_ignored_relaxed_dim3_axis1_neg,
-                           channel_shuffle::examples_relaxed_dim3_axis1_neg);
+                           channel_shuffle::get_examples_relaxed_dim3_axis1_neg());
 }
 
 TEST_F(ValidationTest, channel_shuffle_relaxed_dim3_axis1_neg) {
   const Model model = channel_shuffle::createTestModel_relaxed_dim3_axis1_neg();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_relaxed_dim3_axis1_neg);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_relaxed_dim3_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2050,12 +2247,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_relaxed_dim3_axis2,
                            channel_shuffle::is_ignored_relaxed_dim3_axis2,
-                           channel_shuffle::examples_relaxed_dim3_axis2);
+                           channel_shuffle::get_examples_relaxed_dim3_axis2());
 }
 
 TEST_F(ValidationTest, channel_shuffle_relaxed_dim3_axis2) {
   const Model model = channel_shuffle::createTestModel_relaxed_dim3_axis2();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_relaxed_dim3_axis2);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_relaxed_dim3_axis2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2065,12 +2262,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_relaxed_dim3_axis2_neg,
                            channel_shuffle::is_ignored_relaxed_dim3_axis2_neg,
-                           channel_shuffle::examples_relaxed_dim3_axis2_neg);
+                           channel_shuffle::get_examples_relaxed_dim3_axis2_neg());
 }
 
 TEST_F(ValidationTest, channel_shuffle_relaxed_dim3_axis2_neg) {
   const Model model = channel_shuffle::createTestModel_relaxed_dim3_axis2_neg();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_relaxed_dim3_axis2_neg);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_relaxed_dim3_axis2_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2080,12 +2277,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_relaxed_dim2_axis0,
                            channel_shuffle::is_ignored_relaxed_dim2_axis0,
-                           channel_shuffle::examples_relaxed_dim2_axis0);
+                           channel_shuffle::get_examples_relaxed_dim2_axis0());
 }
 
 TEST_F(ValidationTest, channel_shuffle_relaxed_dim2_axis0) {
   const Model model = channel_shuffle::createTestModel_relaxed_dim2_axis0();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_relaxed_dim2_axis0);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_relaxed_dim2_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2095,12 +2292,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_relaxed_dim2_axis0_neg,
                            channel_shuffle::is_ignored_relaxed_dim2_axis0_neg,
-                           channel_shuffle::examples_relaxed_dim2_axis0_neg);
+                           channel_shuffle::get_examples_relaxed_dim2_axis0_neg());
 }
 
 TEST_F(ValidationTest, channel_shuffle_relaxed_dim2_axis0_neg) {
   const Model model = channel_shuffle::createTestModel_relaxed_dim2_axis0_neg();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_relaxed_dim2_axis0_neg);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_relaxed_dim2_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2110,12 +2307,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_relaxed_dim2_axis1,
                            channel_shuffle::is_ignored_relaxed_dim2_axis1,
-                           channel_shuffle::examples_relaxed_dim2_axis1);
+                           channel_shuffle::get_examples_relaxed_dim2_axis1());
 }
 
 TEST_F(ValidationTest, channel_shuffle_relaxed_dim2_axis1) {
   const Model model = channel_shuffle::createTestModel_relaxed_dim2_axis1();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_relaxed_dim2_axis1);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_relaxed_dim2_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2125,12 +2322,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_relaxed_dim2_axis1_neg,
                            channel_shuffle::is_ignored_relaxed_dim2_axis1_neg,
-                           channel_shuffle::examples_relaxed_dim2_axis1_neg);
+                           channel_shuffle::get_examples_relaxed_dim2_axis1_neg());
 }
 
 TEST_F(ValidationTest, channel_shuffle_relaxed_dim2_axis1_neg) {
   const Model model = channel_shuffle::createTestModel_relaxed_dim2_axis1_neg();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_relaxed_dim2_axis1_neg);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_relaxed_dim2_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2140,12 +2337,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_relaxed_dim1_axis0,
                            channel_shuffle::is_ignored_relaxed_dim1_axis0,
-                           channel_shuffle::examples_relaxed_dim1_axis0);
+                           channel_shuffle::get_examples_relaxed_dim1_axis0());
 }
 
 TEST_F(ValidationTest, channel_shuffle_relaxed_dim1_axis0) {
   const Model model = channel_shuffle::createTestModel_relaxed_dim1_axis0();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_relaxed_dim1_axis0);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_relaxed_dim1_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2155,12 +2352,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_relaxed_dim1_axis0_neg,
                            channel_shuffle::is_ignored_relaxed_dim1_axis0_neg,
-                           channel_shuffle::examples_relaxed_dim1_axis0_neg);
+                           channel_shuffle::get_examples_relaxed_dim1_axis0_neg());
 }
 
 TEST_F(ValidationTest, channel_shuffle_relaxed_dim1_axis0_neg) {
   const Model model = channel_shuffle::createTestModel_relaxed_dim1_axis0_neg();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_relaxed_dim1_axis0_neg);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_relaxed_dim1_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2170,12 +2367,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_quant8_dim4_axis0,
                            channel_shuffle::is_ignored_quant8_dim4_axis0,
-                           channel_shuffle::examples_quant8_dim4_axis0);
+                           channel_shuffle::get_examples_quant8_dim4_axis0());
 }
 
 TEST_F(ValidationTest, channel_shuffle_quant8_dim4_axis0) {
   const Model model = channel_shuffle::createTestModel_quant8_dim4_axis0();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_quant8_dim4_axis0);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_quant8_dim4_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2185,12 +2382,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_quant8_dim4_axis0_neg,
                            channel_shuffle::is_ignored_quant8_dim4_axis0_neg,
-                           channel_shuffle::examples_quant8_dim4_axis0_neg);
+                           channel_shuffle::get_examples_quant8_dim4_axis0_neg());
 }
 
 TEST_F(ValidationTest, channel_shuffle_quant8_dim4_axis0_neg) {
   const Model model = channel_shuffle::createTestModel_quant8_dim4_axis0_neg();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_quant8_dim4_axis0_neg);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_quant8_dim4_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2200,12 +2397,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_quant8_dim4_axis1,
                            channel_shuffle::is_ignored_quant8_dim4_axis1,
-                           channel_shuffle::examples_quant8_dim4_axis1);
+                           channel_shuffle::get_examples_quant8_dim4_axis1());
 }
 
 TEST_F(ValidationTest, channel_shuffle_quant8_dim4_axis1) {
   const Model model = channel_shuffle::createTestModel_quant8_dim4_axis1();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_quant8_dim4_axis1);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_quant8_dim4_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2215,12 +2412,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_quant8_dim4_axis1_neg,
                            channel_shuffle::is_ignored_quant8_dim4_axis1_neg,
-                           channel_shuffle::examples_quant8_dim4_axis1_neg);
+                           channel_shuffle::get_examples_quant8_dim4_axis1_neg());
 }
 
 TEST_F(ValidationTest, channel_shuffle_quant8_dim4_axis1_neg) {
   const Model model = channel_shuffle::createTestModel_quant8_dim4_axis1_neg();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_quant8_dim4_axis1_neg);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_quant8_dim4_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2230,12 +2427,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_quant8_dim4_axis2,
                            channel_shuffle::is_ignored_quant8_dim4_axis2,
-                           channel_shuffle::examples_quant8_dim4_axis2);
+                           channel_shuffle::get_examples_quant8_dim4_axis2());
 }
 
 TEST_F(ValidationTest, channel_shuffle_quant8_dim4_axis2) {
   const Model model = channel_shuffle::createTestModel_quant8_dim4_axis2();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_quant8_dim4_axis2);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_quant8_dim4_axis2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2245,12 +2442,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_quant8_dim4_axis2_neg,
                            channel_shuffle::is_ignored_quant8_dim4_axis2_neg,
-                           channel_shuffle::examples_quant8_dim4_axis2_neg);
+                           channel_shuffle::get_examples_quant8_dim4_axis2_neg());
 }
 
 TEST_F(ValidationTest, channel_shuffle_quant8_dim4_axis2_neg) {
   const Model model = channel_shuffle::createTestModel_quant8_dim4_axis2_neg();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_quant8_dim4_axis2_neg);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_quant8_dim4_axis2_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2260,12 +2457,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_quant8_dim4_axis3,
                            channel_shuffle::is_ignored_quant8_dim4_axis3,
-                           channel_shuffle::examples_quant8_dim4_axis3);
+                           channel_shuffle::get_examples_quant8_dim4_axis3());
 }
 
 TEST_F(ValidationTest, channel_shuffle_quant8_dim4_axis3) {
   const Model model = channel_shuffle::createTestModel_quant8_dim4_axis3();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_quant8_dim4_axis3);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_quant8_dim4_axis3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2275,12 +2472,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_quant8_dim4_axis3_neg,
                            channel_shuffle::is_ignored_quant8_dim4_axis3_neg,
-                           channel_shuffle::examples_quant8_dim4_axis3_neg);
+                           channel_shuffle::get_examples_quant8_dim4_axis3_neg());
 }
 
 TEST_F(ValidationTest, channel_shuffle_quant8_dim4_axis3_neg) {
   const Model model = channel_shuffle::createTestModel_quant8_dim4_axis3_neg();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_quant8_dim4_axis3_neg);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_quant8_dim4_axis3_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2290,12 +2487,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_quant8_dim3_axis0,
                            channel_shuffle::is_ignored_quant8_dim3_axis0,
-                           channel_shuffle::examples_quant8_dim3_axis0);
+                           channel_shuffle::get_examples_quant8_dim3_axis0());
 }
 
 TEST_F(ValidationTest, channel_shuffle_quant8_dim3_axis0) {
   const Model model = channel_shuffle::createTestModel_quant8_dim3_axis0();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_quant8_dim3_axis0);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_quant8_dim3_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2305,12 +2502,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_quant8_dim3_axis0_neg,
                            channel_shuffle::is_ignored_quant8_dim3_axis0_neg,
-                           channel_shuffle::examples_quant8_dim3_axis0_neg);
+                           channel_shuffle::get_examples_quant8_dim3_axis0_neg());
 }
 
 TEST_F(ValidationTest, channel_shuffle_quant8_dim3_axis0_neg) {
   const Model model = channel_shuffle::createTestModel_quant8_dim3_axis0_neg();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_quant8_dim3_axis0_neg);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_quant8_dim3_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2320,12 +2517,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_quant8_dim3_axis1,
                            channel_shuffle::is_ignored_quant8_dim3_axis1,
-                           channel_shuffle::examples_quant8_dim3_axis1);
+                           channel_shuffle::get_examples_quant8_dim3_axis1());
 }
 
 TEST_F(ValidationTest, channel_shuffle_quant8_dim3_axis1) {
   const Model model = channel_shuffle::createTestModel_quant8_dim3_axis1();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_quant8_dim3_axis1);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_quant8_dim3_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2335,12 +2532,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_quant8_dim3_axis1_neg,
                            channel_shuffle::is_ignored_quant8_dim3_axis1_neg,
-                           channel_shuffle::examples_quant8_dim3_axis1_neg);
+                           channel_shuffle::get_examples_quant8_dim3_axis1_neg());
 }
 
 TEST_F(ValidationTest, channel_shuffle_quant8_dim3_axis1_neg) {
   const Model model = channel_shuffle::createTestModel_quant8_dim3_axis1_neg();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_quant8_dim3_axis1_neg);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_quant8_dim3_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2350,12 +2547,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_quant8_dim3_axis2,
                            channel_shuffle::is_ignored_quant8_dim3_axis2,
-                           channel_shuffle::examples_quant8_dim3_axis2);
+                           channel_shuffle::get_examples_quant8_dim3_axis2());
 }
 
 TEST_F(ValidationTest, channel_shuffle_quant8_dim3_axis2) {
   const Model model = channel_shuffle::createTestModel_quant8_dim3_axis2();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_quant8_dim3_axis2);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_quant8_dim3_axis2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2365,12 +2562,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_quant8_dim3_axis2_neg,
                            channel_shuffle::is_ignored_quant8_dim3_axis2_neg,
-                           channel_shuffle::examples_quant8_dim3_axis2_neg);
+                           channel_shuffle::get_examples_quant8_dim3_axis2_neg());
 }
 
 TEST_F(ValidationTest, channel_shuffle_quant8_dim3_axis2_neg) {
   const Model model = channel_shuffle::createTestModel_quant8_dim3_axis2_neg();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_quant8_dim3_axis2_neg);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_quant8_dim3_axis2_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2380,12 +2577,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_quant8_dim2_axis0,
                            channel_shuffle::is_ignored_quant8_dim2_axis0,
-                           channel_shuffle::examples_quant8_dim2_axis0);
+                           channel_shuffle::get_examples_quant8_dim2_axis0());
 }
 
 TEST_F(ValidationTest, channel_shuffle_quant8_dim2_axis0) {
   const Model model = channel_shuffle::createTestModel_quant8_dim2_axis0();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_quant8_dim2_axis0);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_quant8_dim2_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2395,12 +2592,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_quant8_dim2_axis0_neg,
                            channel_shuffle::is_ignored_quant8_dim2_axis0_neg,
-                           channel_shuffle::examples_quant8_dim2_axis0_neg);
+                           channel_shuffle::get_examples_quant8_dim2_axis0_neg());
 }
 
 TEST_F(ValidationTest, channel_shuffle_quant8_dim2_axis0_neg) {
   const Model model = channel_shuffle::createTestModel_quant8_dim2_axis0_neg();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_quant8_dim2_axis0_neg);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_quant8_dim2_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2410,12 +2607,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_quant8_dim2_axis1,
                            channel_shuffle::is_ignored_quant8_dim2_axis1,
-                           channel_shuffle::examples_quant8_dim2_axis1);
+                           channel_shuffle::get_examples_quant8_dim2_axis1());
 }
 
 TEST_F(ValidationTest, channel_shuffle_quant8_dim2_axis1) {
   const Model model = channel_shuffle::createTestModel_quant8_dim2_axis1();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_quant8_dim2_axis1);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_quant8_dim2_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2425,12 +2622,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_quant8_dim2_axis1_neg,
                            channel_shuffle::is_ignored_quant8_dim2_axis1_neg,
-                           channel_shuffle::examples_quant8_dim2_axis1_neg);
+                           channel_shuffle::get_examples_quant8_dim2_axis1_neg());
 }
 
 TEST_F(ValidationTest, channel_shuffle_quant8_dim2_axis1_neg) {
   const Model model = channel_shuffle::createTestModel_quant8_dim2_axis1_neg();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_quant8_dim2_axis1_neg);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_quant8_dim2_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2440,12 +2637,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_quant8_dim1_axis0,
                            channel_shuffle::is_ignored_quant8_dim1_axis0,
-                           channel_shuffle::examples_quant8_dim1_axis0);
+                           channel_shuffle::get_examples_quant8_dim1_axis0());
 }
 
 TEST_F(ValidationTest, channel_shuffle_quant8_dim1_axis0) {
   const Model model = channel_shuffle::createTestModel_quant8_dim1_axis0();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_quant8_dim1_axis0);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_quant8_dim1_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2455,12 +2652,12 @@
   generated_tests::Execute(device,
                            channel_shuffle::createTestModel_quant8_dim1_axis0_neg,
                            channel_shuffle::is_ignored_quant8_dim1_axis0_neg,
-                           channel_shuffle::examples_quant8_dim1_axis0_neg);
+                           channel_shuffle::get_examples_quant8_dim1_axis0_neg());
 }
 
 TEST_F(ValidationTest, channel_shuffle_quant8_dim1_axis0_neg) {
   const Model model = channel_shuffle::createTestModel_quant8_dim1_axis0_neg();
-  const std::vector<Request> requests = createRequests(channel_shuffle::examples_quant8_dim1_axis0_neg);
+  const std::vector<Request> requests = createRequests(channel_shuffle::get_examples_quant8_dim1_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2478,12 +2675,12 @@
   generated_tests::Execute(device,
                            concat_float16_1::createTestModel,
                            concat_float16_1::is_ignored,
-                           concat_float16_1::examples);
+                           concat_float16_1::get_examples());
 }
 
 TEST_F(ValidationTest, concat_float16_1) {
   const Model model = concat_float16_1::createTestModel();
-  const std::vector<Request> requests = createRequests(concat_float16_1::examples);
+  const std::vector<Request> requests = createRequests(concat_float16_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2501,12 +2698,12 @@
   generated_tests::Execute(device,
                            concat_float16_2::createTestModel,
                            concat_float16_2::is_ignored,
-                           concat_float16_2::examples);
+                           concat_float16_2::get_examples());
 }
 
 TEST_F(ValidationTest, concat_float16_2) {
   const Model model = concat_float16_2::createTestModel();
-  const std::vector<Request> requests = createRequests(concat_float16_2::examples);
+  const std::vector<Request> requests = createRequests(concat_float16_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2524,12 +2721,50 @@
   generated_tests::Execute(device,
                            concat_float16_3::createTestModel,
                            concat_float16_3::is_ignored,
-                           concat_float16_3::examples);
+                           concat_float16_3::get_examples());
 }
 
 TEST_F(ValidationTest, concat_float16_3) {
   const Model model = concat_float16_3::createTestModel();
-  const std::vector<Request> requests = createRequests(concat_float16_3::examples);
+  const std::vector<Request> requests = createRequests(concat_float16_3::get_examples());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+// Generated from: concat_mixed_quant.mod.py.
+namespace concat_mixed_quant {
+// Generated concat_mixed_quant test
+#include "examples/concat_mixed_quant.example.cpp"
+// Generated model constructor
+#include "vts_models/concat_mixed_quant.model.cpp"
+} // namespace concat_mixed_quant
+
+TEST_F(NeuralnetworksHidlTest, concat_mixed_quant_quant8) {
+  generated_tests::Execute(device,
+                           concat_mixed_quant::createTestModel_quant8,
+                           concat_mixed_quant::is_ignored_quant8,
+                           concat_mixed_quant::get_examples_quant8());
+}
+
+TEST_F(ValidationTest, concat_mixed_quant_quant8) {
+  const Model model = concat_mixed_quant::createTestModel_quant8();
+  const std::vector<Request> requests = createRequests(concat_mixed_quant::get_examples_quant8());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, concat_mixed_quant_quant8_2) {
+  generated_tests::Execute(device,
+                           concat_mixed_quant::createTestModel_quant8_2,
+                           concat_mixed_quant::is_ignored_quant8_2,
+                           concat_mixed_quant::get_examples_quant8_2());
+}
+
+TEST_F(ValidationTest, concat_mixed_quant_quant8_2) {
+  const Model model = concat_mixed_quant::createTestModel_quant8_2();
+  const std::vector<Request> requests = createRequests(concat_mixed_quant::get_examples_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2547,12 +2782,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_nhwc,
                            conv2d_v1_2::is_ignored_nhwc,
-                           conv2d_v1_2::examples_nhwc);
+                           conv2d_v1_2::get_examples_nhwc());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_nhwc) {
   const Model model = conv2d_v1_2::createTestModel_nhwc();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_nhwc);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_nhwc());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2562,12 +2797,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_nhwc_relaxed,
                            conv2d_v1_2::is_ignored_nhwc_relaxed,
-                           conv2d_v1_2::examples_nhwc_relaxed);
+                           conv2d_v1_2::get_examples_nhwc_relaxed());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_nhwc_relaxed) {
   const Model model = conv2d_v1_2::createTestModel_nhwc_relaxed();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_nhwc_relaxed);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_nhwc_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2577,12 +2812,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_nhwc_quant8,
                            conv2d_v1_2::is_ignored_nhwc_quant8,
-                           conv2d_v1_2::examples_nhwc_quant8);
+                           conv2d_v1_2::get_examples_nhwc_quant8());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_nhwc_quant8) {
   const Model model = conv2d_v1_2::createTestModel_nhwc_quant8();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_nhwc_quant8);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_nhwc_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2592,12 +2827,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_nhwc_weight_as_input,
                            conv2d_v1_2::is_ignored_nhwc_weight_as_input,
-                           conv2d_v1_2::examples_nhwc_weight_as_input);
+                           conv2d_v1_2::get_examples_nhwc_weight_as_input());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_nhwc_weight_as_input) {
   const Model model = conv2d_v1_2::createTestModel_nhwc_weight_as_input();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_nhwc_weight_as_input);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_nhwc_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2607,12 +2842,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_nhwc_weight_as_input_relaxed,
                            conv2d_v1_2::is_ignored_nhwc_weight_as_input_relaxed,
-                           conv2d_v1_2::examples_nhwc_weight_as_input_relaxed);
+                           conv2d_v1_2::get_examples_nhwc_weight_as_input_relaxed());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_nhwc_weight_as_input_relaxed) {
   const Model model = conv2d_v1_2::createTestModel_nhwc_weight_as_input_relaxed();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_nhwc_weight_as_input_relaxed);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_nhwc_weight_as_input_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2622,12 +2857,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_nhwc_weight_as_input_quant8,
                            conv2d_v1_2::is_ignored_nhwc_weight_as_input_quant8,
-                           conv2d_v1_2::examples_nhwc_weight_as_input_quant8);
+                           conv2d_v1_2::get_examples_nhwc_weight_as_input_quant8());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_nhwc_weight_as_input_quant8) {
   const Model model = conv2d_v1_2::createTestModel_nhwc_weight_as_input_quant8();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_nhwc_weight_as_input_quant8);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_nhwc_weight_as_input_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2637,12 +2872,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_nchw,
                            conv2d_v1_2::is_ignored_nchw,
-                           conv2d_v1_2::examples_nchw);
+                           conv2d_v1_2::get_examples_nchw());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_nchw) {
   const Model model = conv2d_v1_2::createTestModel_nchw();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_nchw);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_nchw());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2652,12 +2887,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_nchw_relaxed,
                            conv2d_v1_2::is_ignored_nchw_relaxed,
-                           conv2d_v1_2::examples_nchw_relaxed);
+                           conv2d_v1_2::get_examples_nchw_relaxed());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_nchw_relaxed) {
   const Model model = conv2d_v1_2::createTestModel_nchw_relaxed();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_nchw_relaxed);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_nchw_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2667,12 +2902,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_nchw_quant8,
                            conv2d_v1_2::is_ignored_nchw_quant8,
-                           conv2d_v1_2::examples_nchw_quant8);
+                           conv2d_v1_2::get_examples_nchw_quant8());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_nchw_quant8) {
   const Model model = conv2d_v1_2::createTestModel_nchw_quant8();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_nchw_quant8);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_nchw_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2682,12 +2917,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_nchw_weight_as_input,
                            conv2d_v1_2::is_ignored_nchw_weight_as_input,
-                           conv2d_v1_2::examples_nchw_weight_as_input);
+                           conv2d_v1_2::get_examples_nchw_weight_as_input());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_nchw_weight_as_input) {
   const Model model = conv2d_v1_2::createTestModel_nchw_weight_as_input();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_nchw_weight_as_input);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_nchw_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2697,12 +2932,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_nchw_weight_as_input_relaxed,
                            conv2d_v1_2::is_ignored_nchw_weight_as_input_relaxed,
-                           conv2d_v1_2::examples_nchw_weight_as_input_relaxed);
+                           conv2d_v1_2::get_examples_nchw_weight_as_input_relaxed());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_nchw_weight_as_input_relaxed) {
   const Model model = conv2d_v1_2::createTestModel_nchw_weight_as_input_relaxed();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_nchw_weight_as_input_relaxed);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_nchw_weight_as_input_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2712,12 +2947,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_nchw_weight_as_input_quant8,
                            conv2d_v1_2::is_ignored_nchw_weight_as_input_quant8,
-                           conv2d_v1_2::examples_nchw_weight_as_input_quant8);
+                           conv2d_v1_2::get_examples_nchw_weight_as_input_quant8());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_nchw_weight_as_input_quant8) {
   const Model model = conv2d_v1_2::createTestModel_nchw_weight_as_input_quant8();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_nchw_weight_as_input_quant8);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_nchw_weight_as_input_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2727,12 +2962,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_nhwc_2,
                            conv2d_v1_2::is_ignored_nhwc_2,
-                           conv2d_v1_2::examples_nhwc_2);
+                           conv2d_v1_2::get_examples_nhwc_2());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_nhwc_2) {
   const Model model = conv2d_v1_2::createTestModel_nhwc_2();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_nhwc_2);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_nhwc_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2742,12 +2977,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_nhwc_relaxed_2,
                            conv2d_v1_2::is_ignored_nhwc_relaxed_2,
-                           conv2d_v1_2::examples_nhwc_relaxed_2);
+                           conv2d_v1_2::get_examples_nhwc_relaxed_2());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_nhwc_relaxed_2) {
   const Model model = conv2d_v1_2::createTestModel_nhwc_relaxed_2();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_nhwc_relaxed_2);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_nhwc_relaxed_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2757,12 +2992,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_nhwc_quant8_2,
                            conv2d_v1_2::is_ignored_nhwc_quant8_2,
-                           conv2d_v1_2::examples_nhwc_quant8_2);
+                           conv2d_v1_2::get_examples_nhwc_quant8_2());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_nhwc_quant8_2) {
   const Model model = conv2d_v1_2::createTestModel_nhwc_quant8_2();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_nhwc_quant8_2);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_nhwc_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2772,12 +3007,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_nhwc_weight_as_input_2,
                            conv2d_v1_2::is_ignored_nhwc_weight_as_input_2,
-                           conv2d_v1_2::examples_nhwc_weight_as_input_2);
+                           conv2d_v1_2::get_examples_nhwc_weight_as_input_2());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_nhwc_weight_as_input_2) {
   const Model model = conv2d_v1_2::createTestModel_nhwc_weight_as_input_2();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_nhwc_weight_as_input_2);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_nhwc_weight_as_input_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2787,12 +3022,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_nhwc_weight_as_input_relaxed_2,
                            conv2d_v1_2::is_ignored_nhwc_weight_as_input_relaxed_2,
-                           conv2d_v1_2::examples_nhwc_weight_as_input_relaxed_2);
+                           conv2d_v1_2::get_examples_nhwc_weight_as_input_relaxed_2());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_nhwc_weight_as_input_relaxed_2) {
   const Model model = conv2d_v1_2::createTestModel_nhwc_weight_as_input_relaxed_2();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_nhwc_weight_as_input_relaxed_2);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_nhwc_weight_as_input_relaxed_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2802,12 +3037,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_nhwc_weight_as_input_quant8_2,
                            conv2d_v1_2::is_ignored_nhwc_weight_as_input_quant8_2,
-                           conv2d_v1_2::examples_nhwc_weight_as_input_quant8_2);
+                           conv2d_v1_2::get_examples_nhwc_weight_as_input_quant8_2());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_nhwc_weight_as_input_quant8_2) {
   const Model model = conv2d_v1_2::createTestModel_nhwc_weight_as_input_quant8_2();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_nhwc_weight_as_input_quant8_2);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_nhwc_weight_as_input_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2817,12 +3052,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_nchw_2,
                            conv2d_v1_2::is_ignored_nchw_2,
-                           conv2d_v1_2::examples_nchw_2);
+                           conv2d_v1_2::get_examples_nchw_2());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_nchw_2) {
   const Model model = conv2d_v1_2::createTestModel_nchw_2();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_nchw_2);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_nchw_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2832,12 +3067,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_nchw_relaxed_2,
                            conv2d_v1_2::is_ignored_nchw_relaxed_2,
-                           conv2d_v1_2::examples_nchw_relaxed_2);
+                           conv2d_v1_2::get_examples_nchw_relaxed_2());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_nchw_relaxed_2) {
   const Model model = conv2d_v1_2::createTestModel_nchw_relaxed_2();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_nchw_relaxed_2);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_nchw_relaxed_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2847,12 +3082,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_nchw_quant8_2,
                            conv2d_v1_2::is_ignored_nchw_quant8_2,
-                           conv2d_v1_2::examples_nchw_quant8_2);
+                           conv2d_v1_2::get_examples_nchw_quant8_2());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_nchw_quant8_2) {
   const Model model = conv2d_v1_2::createTestModel_nchw_quant8_2();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_nchw_quant8_2);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_nchw_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2862,12 +3097,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_nchw_weight_as_input_2,
                            conv2d_v1_2::is_ignored_nchw_weight_as_input_2,
-                           conv2d_v1_2::examples_nchw_weight_as_input_2);
+                           conv2d_v1_2::get_examples_nchw_weight_as_input_2());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_nchw_weight_as_input_2) {
   const Model model = conv2d_v1_2::createTestModel_nchw_weight_as_input_2();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_nchw_weight_as_input_2);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_nchw_weight_as_input_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2877,12 +3112,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_nchw_weight_as_input_relaxed_2,
                            conv2d_v1_2::is_ignored_nchw_weight_as_input_relaxed_2,
-                           conv2d_v1_2::examples_nchw_weight_as_input_relaxed_2);
+                           conv2d_v1_2::get_examples_nchw_weight_as_input_relaxed_2());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_nchw_weight_as_input_relaxed_2) {
   const Model model = conv2d_v1_2::createTestModel_nchw_weight_as_input_relaxed_2();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_nchw_weight_as_input_relaxed_2);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_nchw_weight_as_input_relaxed_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2892,12 +3127,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_nchw_weight_as_input_quant8_2,
                            conv2d_v1_2::is_ignored_nchw_weight_as_input_quant8_2,
-                           conv2d_v1_2::examples_nchw_weight_as_input_quant8_2);
+                           conv2d_v1_2::get_examples_nchw_weight_as_input_quant8_2());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_nchw_weight_as_input_quant8_2) {
   const Model model = conv2d_v1_2::createTestModel_nchw_weight_as_input_quant8_2();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_nchw_weight_as_input_quant8_2);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_nchw_weight_as_input_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2907,12 +3142,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_channel_nhwc,
                            conv2d_v1_2::is_ignored_channel_nhwc,
-                           conv2d_v1_2::examples_channel_nhwc);
+                           conv2d_v1_2::get_examples_channel_nhwc());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_channel_nhwc) {
   const Model model = conv2d_v1_2::createTestModel_channel_nhwc();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_channel_nhwc);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_channel_nhwc());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2922,12 +3157,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_channel_nhwc_relaxed,
                            conv2d_v1_2::is_ignored_channel_nhwc_relaxed,
-                           conv2d_v1_2::examples_channel_nhwc_relaxed);
+                           conv2d_v1_2::get_examples_channel_nhwc_relaxed());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_channel_nhwc_relaxed) {
   const Model model = conv2d_v1_2::createTestModel_channel_nhwc_relaxed();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_channel_nhwc_relaxed);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_channel_nhwc_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2937,12 +3172,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_channel_nhwc_quant8,
                            conv2d_v1_2::is_ignored_channel_nhwc_quant8,
-                           conv2d_v1_2::examples_channel_nhwc_quant8);
+                           conv2d_v1_2::get_examples_channel_nhwc_quant8());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_channel_nhwc_quant8) {
   const Model model = conv2d_v1_2::createTestModel_channel_nhwc_quant8();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_channel_nhwc_quant8);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_channel_nhwc_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2952,12 +3187,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_channel_nhwc_weight_as_input,
                            conv2d_v1_2::is_ignored_channel_nhwc_weight_as_input,
-                           conv2d_v1_2::examples_channel_nhwc_weight_as_input);
+                           conv2d_v1_2::get_examples_channel_nhwc_weight_as_input());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_channel_nhwc_weight_as_input) {
   const Model model = conv2d_v1_2::createTestModel_channel_nhwc_weight_as_input();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_channel_nhwc_weight_as_input);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_channel_nhwc_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2967,12 +3202,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_channel_nhwc_weight_as_input_relaxed,
                            conv2d_v1_2::is_ignored_channel_nhwc_weight_as_input_relaxed,
-                           conv2d_v1_2::examples_channel_nhwc_weight_as_input_relaxed);
+                           conv2d_v1_2::get_examples_channel_nhwc_weight_as_input_relaxed());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_channel_nhwc_weight_as_input_relaxed) {
   const Model model = conv2d_v1_2::createTestModel_channel_nhwc_weight_as_input_relaxed();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_channel_nhwc_weight_as_input_relaxed);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_channel_nhwc_weight_as_input_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2982,12 +3217,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_channel_nhwc_weight_as_input_quant8,
                            conv2d_v1_2::is_ignored_channel_nhwc_weight_as_input_quant8,
-                           conv2d_v1_2::examples_channel_nhwc_weight_as_input_quant8);
+                           conv2d_v1_2::get_examples_channel_nhwc_weight_as_input_quant8());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_channel_nhwc_weight_as_input_quant8) {
   const Model model = conv2d_v1_2::createTestModel_channel_nhwc_weight_as_input_quant8();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_channel_nhwc_weight_as_input_quant8);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_channel_nhwc_weight_as_input_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -2997,12 +3232,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_channel_nchw,
                            conv2d_v1_2::is_ignored_channel_nchw,
-                           conv2d_v1_2::examples_channel_nchw);
+                           conv2d_v1_2::get_examples_channel_nchw());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_channel_nchw) {
   const Model model = conv2d_v1_2::createTestModel_channel_nchw();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_channel_nchw);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_channel_nchw());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3012,12 +3247,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_channel_nchw_relaxed,
                            conv2d_v1_2::is_ignored_channel_nchw_relaxed,
-                           conv2d_v1_2::examples_channel_nchw_relaxed);
+                           conv2d_v1_2::get_examples_channel_nchw_relaxed());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_channel_nchw_relaxed) {
   const Model model = conv2d_v1_2::createTestModel_channel_nchw_relaxed();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_channel_nchw_relaxed);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_channel_nchw_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3027,12 +3262,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_channel_nchw_quant8,
                            conv2d_v1_2::is_ignored_channel_nchw_quant8,
-                           conv2d_v1_2::examples_channel_nchw_quant8);
+                           conv2d_v1_2::get_examples_channel_nchw_quant8());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_channel_nchw_quant8) {
   const Model model = conv2d_v1_2::createTestModel_channel_nchw_quant8();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_channel_nchw_quant8);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_channel_nchw_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3042,12 +3277,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_channel_nchw_weight_as_input,
                            conv2d_v1_2::is_ignored_channel_nchw_weight_as_input,
-                           conv2d_v1_2::examples_channel_nchw_weight_as_input);
+                           conv2d_v1_2::get_examples_channel_nchw_weight_as_input());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_channel_nchw_weight_as_input) {
   const Model model = conv2d_v1_2::createTestModel_channel_nchw_weight_as_input();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_channel_nchw_weight_as_input);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_channel_nchw_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3057,12 +3292,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_channel_nchw_weight_as_input_relaxed,
                            conv2d_v1_2::is_ignored_channel_nchw_weight_as_input_relaxed,
-                           conv2d_v1_2::examples_channel_nchw_weight_as_input_relaxed);
+                           conv2d_v1_2::get_examples_channel_nchw_weight_as_input_relaxed());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_channel_nchw_weight_as_input_relaxed) {
   const Model model = conv2d_v1_2::createTestModel_channel_nchw_weight_as_input_relaxed();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_channel_nchw_weight_as_input_relaxed);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_channel_nchw_weight_as_input_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3072,12 +3307,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_channel_nchw_weight_as_input_quant8,
                            conv2d_v1_2::is_ignored_channel_nchw_weight_as_input_quant8,
-                           conv2d_v1_2::examples_channel_nchw_weight_as_input_quant8);
+                           conv2d_v1_2::get_examples_channel_nchw_weight_as_input_quant8());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_channel_nchw_weight_as_input_quant8) {
   const Model model = conv2d_v1_2::createTestModel_channel_nchw_weight_as_input_quant8();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_channel_nchw_weight_as_input_quant8);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_channel_nchw_weight_as_input_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3087,12 +3322,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_large_nhwc,
                            conv2d_v1_2::is_ignored_large_nhwc,
-                           conv2d_v1_2::examples_large_nhwc);
+                           conv2d_v1_2::get_examples_large_nhwc());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_large_nhwc) {
   const Model model = conv2d_v1_2::createTestModel_large_nhwc();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_large_nhwc);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_large_nhwc());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3102,12 +3337,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_large_nhwc_relaxed,
                            conv2d_v1_2::is_ignored_large_nhwc_relaxed,
-                           conv2d_v1_2::examples_large_nhwc_relaxed);
+                           conv2d_v1_2::get_examples_large_nhwc_relaxed());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_large_nhwc_relaxed) {
   const Model model = conv2d_v1_2::createTestModel_large_nhwc_relaxed();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_large_nhwc_relaxed);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_large_nhwc_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3117,12 +3352,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_large_nhwc_quant8,
                            conv2d_v1_2::is_ignored_large_nhwc_quant8,
-                           conv2d_v1_2::examples_large_nhwc_quant8);
+                           conv2d_v1_2::get_examples_large_nhwc_quant8());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_large_nhwc_quant8) {
   const Model model = conv2d_v1_2::createTestModel_large_nhwc_quant8();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_large_nhwc_quant8);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_large_nhwc_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3132,12 +3367,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_large_nhwc_weight_as_input,
                            conv2d_v1_2::is_ignored_large_nhwc_weight_as_input,
-                           conv2d_v1_2::examples_large_nhwc_weight_as_input);
+                           conv2d_v1_2::get_examples_large_nhwc_weight_as_input());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_large_nhwc_weight_as_input) {
   const Model model = conv2d_v1_2::createTestModel_large_nhwc_weight_as_input();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_large_nhwc_weight_as_input);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_large_nhwc_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3147,12 +3382,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_large_nhwc_weight_as_input_relaxed,
                            conv2d_v1_2::is_ignored_large_nhwc_weight_as_input_relaxed,
-                           conv2d_v1_2::examples_large_nhwc_weight_as_input_relaxed);
+                           conv2d_v1_2::get_examples_large_nhwc_weight_as_input_relaxed());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_large_nhwc_weight_as_input_relaxed) {
   const Model model = conv2d_v1_2::createTestModel_large_nhwc_weight_as_input_relaxed();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_large_nhwc_weight_as_input_relaxed);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_large_nhwc_weight_as_input_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3162,12 +3397,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_large_nhwc_weight_as_input_quant8,
                            conv2d_v1_2::is_ignored_large_nhwc_weight_as_input_quant8,
-                           conv2d_v1_2::examples_large_nhwc_weight_as_input_quant8);
+                           conv2d_v1_2::get_examples_large_nhwc_weight_as_input_quant8());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_large_nhwc_weight_as_input_quant8) {
   const Model model = conv2d_v1_2::createTestModel_large_nhwc_weight_as_input_quant8();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_large_nhwc_weight_as_input_quant8);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_large_nhwc_weight_as_input_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3177,12 +3412,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_large_nchw,
                            conv2d_v1_2::is_ignored_large_nchw,
-                           conv2d_v1_2::examples_large_nchw);
+                           conv2d_v1_2::get_examples_large_nchw());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_large_nchw) {
   const Model model = conv2d_v1_2::createTestModel_large_nchw();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_large_nchw);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_large_nchw());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3192,12 +3427,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_large_nchw_relaxed,
                            conv2d_v1_2::is_ignored_large_nchw_relaxed,
-                           conv2d_v1_2::examples_large_nchw_relaxed);
+                           conv2d_v1_2::get_examples_large_nchw_relaxed());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_large_nchw_relaxed) {
   const Model model = conv2d_v1_2::createTestModel_large_nchw_relaxed();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_large_nchw_relaxed);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_large_nchw_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3207,12 +3442,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_large_nchw_quant8,
                            conv2d_v1_2::is_ignored_large_nchw_quant8,
-                           conv2d_v1_2::examples_large_nchw_quant8);
+                           conv2d_v1_2::get_examples_large_nchw_quant8());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_large_nchw_quant8) {
   const Model model = conv2d_v1_2::createTestModel_large_nchw_quant8();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_large_nchw_quant8);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_large_nchw_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3222,12 +3457,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_large_nchw_weight_as_input,
                            conv2d_v1_2::is_ignored_large_nchw_weight_as_input,
-                           conv2d_v1_2::examples_large_nchw_weight_as_input);
+                           conv2d_v1_2::get_examples_large_nchw_weight_as_input());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_large_nchw_weight_as_input) {
   const Model model = conv2d_v1_2::createTestModel_large_nchw_weight_as_input();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_large_nchw_weight_as_input);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_large_nchw_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3237,12 +3472,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_large_nchw_weight_as_input_relaxed,
                            conv2d_v1_2::is_ignored_large_nchw_weight_as_input_relaxed,
-                           conv2d_v1_2::examples_large_nchw_weight_as_input_relaxed);
+                           conv2d_v1_2::get_examples_large_nchw_weight_as_input_relaxed());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_large_nchw_weight_as_input_relaxed) {
   const Model model = conv2d_v1_2::createTestModel_large_nchw_weight_as_input_relaxed();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_large_nchw_weight_as_input_relaxed);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_large_nchw_weight_as_input_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3252,12 +3487,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_large_nchw_weight_as_input_quant8,
                            conv2d_v1_2::is_ignored_large_nchw_weight_as_input_quant8,
-                           conv2d_v1_2::examples_large_nchw_weight_as_input_quant8);
+                           conv2d_v1_2::get_examples_large_nchw_weight_as_input_quant8());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_large_nchw_weight_as_input_quant8) {
   const Model model = conv2d_v1_2::createTestModel_large_nchw_weight_as_input_quant8();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_large_nchw_weight_as_input_quant8);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_large_nchw_weight_as_input_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3267,12 +3502,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_1_H3_W2_SAME_nhwc,
                            conv2d_v1_2::is_ignored_1_H3_W2_SAME_nhwc,
-                           conv2d_v1_2::examples_1_H3_W2_SAME_nhwc);
+                           conv2d_v1_2::get_examples_1_H3_W2_SAME_nhwc());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_1_H3_W2_SAME_nhwc) {
   const Model model = conv2d_v1_2::createTestModel_1_H3_W2_SAME_nhwc();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_1_H3_W2_SAME_nhwc);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_1_H3_W2_SAME_nhwc());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3282,12 +3517,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_1_H3_W2_SAME_nhwc_relaxed,
                            conv2d_v1_2::is_ignored_1_H3_W2_SAME_nhwc_relaxed,
-                           conv2d_v1_2::examples_1_H3_W2_SAME_nhwc_relaxed);
+                           conv2d_v1_2::get_examples_1_H3_W2_SAME_nhwc_relaxed());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_1_H3_W2_SAME_nhwc_relaxed) {
   const Model model = conv2d_v1_2::createTestModel_1_H3_W2_SAME_nhwc_relaxed();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_1_H3_W2_SAME_nhwc_relaxed);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_1_H3_W2_SAME_nhwc_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3297,12 +3532,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_1_H3_W2_SAME_nchw,
                            conv2d_v1_2::is_ignored_1_H3_W2_SAME_nchw,
-                           conv2d_v1_2::examples_1_H3_W2_SAME_nchw);
+                           conv2d_v1_2::get_examples_1_H3_W2_SAME_nchw());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_1_H3_W2_SAME_nchw) {
   const Model model = conv2d_v1_2::createTestModel_1_H3_W2_SAME_nchw();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_1_H3_W2_SAME_nchw);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_1_H3_W2_SAME_nchw());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3312,12 +3547,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_1_H3_W2_SAME_nchw_relaxed,
                            conv2d_v1_2::is_ignored_1_H3_W2_SAME_nchw_relaxed,
-                           conv2d_v1_2::examples_1_H3_W2_SAME_nchw_relaxed);
+                           conv2d_v1_2::get_examples_1_H3_W2_SAME_nchw_relaxed());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_1_H3_W2_SAME_nchw_relaxed) {
   const Model model = conv2d_v1_2::createTestModel_1_H3_W2_SAME_nchw_relaxed();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_1_H3_W2_SAME_nchw_relaxed);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_1_H3_W2_SAME_nchw_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3327,12 +3562,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_1_H3_W2_VALID_nhwc,
                            conv2d_v1_2::is_ignored_1_H3_W2_VALID_nhwc,
-                           conv2d_v1_2::examples_1_H3_W2_VALID_nhwc);
+                           conv2d_v1_2::get_examples_1_H3_W2_VALID_nhwc());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_1_H3_W2_VALID_nhwc) {
   const Model model = conv2d_v1_2::createTestModel_1_H3_W2_VALID_nhwc();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_1_H3_W2_VALID_nhwc);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_1_H3_W2_VALID_nhwc());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3342,12 +3577,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_1_H3_W2_VALID_nhwc_relaxed,
                            conv2d_v1_2::is_ignored_1_H3_W2_VALID_nhwc_relaxed,
-                           conv2d_v1_2::examples_1_H3_W2_VALID_nhwc_relaxed);
+                           conv2d_v1_2::get_examples_1_H3_W2_VALID_nhwc_relaxed());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_1_H3_W2_VALID_nhwc_relaxed) {
   const Model model = conv2d_v1_2::createTestModel_1_H3_W2_VALID_nhwc_relaxed();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_1_H3_W2_VALID_nhwc_relaxed);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_1_H3_W2_VALID_nhwc_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3357,12 +3592,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_1_H3_W2_VALID_nchw,
                            conv2d_v1_2::is_ignored_1_H3_W2_VALID_nchw,
-                           conv2d_v1_2::examples_1_H3_W2_VALID_nchw);
+                           conv2d_v1_2::get_examples_1_H3_W2_VALID_nchw());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_1_H3_W2_VALID_nchw) {
   const Model model = conv2d_v1_2::createTestModel_1_H3_W2_VALID_nchw();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_1_H3_W2_VALID_nchw);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_1_H3_W2_VALID_nchw());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3372,12 +3607,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_1_H3_W2_VALID_nchw_relaxed,
                            conv2d_v1_2::is_ignored_1_H3_W2_VALID_nchw_relaxed,
-                           conv2d_v1_2::examples_1_H3_W2_VALID_nchw_relaxed);
+                           conv2d_v1_2::get_examples_1_H3_W2_VALID_nchw_relaxed());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_1_H3_W2_VALID_nchw_relaxed) {
   const Model model = conv2d_v1_2::createTestModel_1_H3_W2_VALID_nchw_relaxed();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_1_H3_W2_VALID_nchw_relaxed);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_1_H3_W2_VALID_nchw_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3387,12 +3622,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_3_H3_W2_SAME_nhwc,
                            conv2d_v1_2::is_ignored_3_H3_W2_SAME_nhwc,
-                           conv2d_v1_2::examples_3_H3_W2_SAME_nhwc);
+                           conv2d_v1_2::get_examples_3_H3_W2_SAME_nhwc());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_3_H3_W2_SAME_nhwc) {
   const Model model = conv2d_v1_2::createTestModel_3_H3_W2_SAME_nhwc();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_3_H3_W2_SAME_nhwc);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_3_H3_W2_SAME_nhwc());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3402,12 +3637,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_3_H3_W2_SAME_nhwc_relaxed,
                            conv2d_v1_2::is_ignored_3_H3_W2_SAME_nhwc_relaxed,
-                           conv2d_v1_2::examples_3_H3_W2_SAME_nhwc_relaxed);
+                           conv2d_v1_2::get_examples_3_H3_W2_SAME_nhwc_relaxed());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_3_H3_W2_SAME_nhwc_relaxed) {
   const Model model = conv2d_v1_2::createTestModel_3_H3_W2_SAME_nhwc_relaxed();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_3_H3_W2_SAME_nhwc_relaxed);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_3_H3_W2_SAME_nhwc_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3417,12 +3652,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_3_H3_W2_SAME_nchw,
                            conv2d_v1_2::is_ignored_3_H3_W2_SAME_nchw,
-                           conv2d_v1_2::examples_3_H3_W2_SAME_nchw);
+                           conv2d_v1_2::get_examples_3_H3_W2_SAME_nchw());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_3_H3_W2_SAME_nchw) {
   const Model model = conv2d_v1_2::createTestModel_3_H3_W2_SAME_nchw();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_3_H3_W2_SAME_nchw);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_3_H3_W2_SAME_nchw());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3432,12 +3667,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_3_H3_W2_SAME_nchw_relaxed,
                            conv2d_v1_2::is_ignored_3_H3_W2_SAME_nchw_relaxed,
-                           conv2d_v1_2::examples_3_H3_W2_SAME_nchw_relaxed);
+                           conv2d_v1_2::get_examples_3_H3_W2_SAME_nchw_relaxed());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_3_H3_W2_SAME_nchw_relaxed) {
   const Model model = conv2d_v1_2::createTestModel_3_H3_W2_SAME_nchw_relaxed();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_3_H3_W2_SAME_nchw_relaxed);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_3_H3_W2_SAME_nchw_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3447,12 +3682,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_3_H3_W2_VALID_nhwc,
                            conv2d_v1_2::is_ignored_3_H3_W2_VALID_nhwc,
-                           conv2d_v1_2::examples_3_H3_W2_VALID_nhwc);
+                           conv2d_v1_2::get_examples_3_H3_W2_VALID_nhwc());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_3_H3_W2_VALID_nhwc) {
   const Model model = conv2d_v1_2::createTestModel_3_H3_W2_VALID_nhwc();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_3_H3_W2_VALID_nhwc);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_3_H3_W2_VALID_nhwc());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3462,12 +3697,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_3_H3_W2_VALID_nhwc_relaxed,
                            conv2d_v1_2::is_ignored_3_H3_W2_VALID_nhwc_relaxed,
-                           conv2d_v1_2::examples_3_H3_W2_VALID_nhwc_relaxed);
+                           conv2d_v1_2::get_examples_3_H3_W2_VALID_nhwc_relaxed());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_3_H3_W2_VALID_nhwc_relaxed) {
   const Model model = conv2d_v1_2::createTestModel_3_H3_W2_VALID_nhwc_relaxed();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_3_H3_W2_VALID_nhwc_relaxed);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_3_H3_W2_VALID_nhwc_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3477,12 +3712,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_3_H3_W2_VALID_nchw,
                            conv2d_v1_2::is_ignored_3_H3_W2_VALID_nchw,
-                           conv2d_v1_2::examples_3_H3_W2_VALID_nchw);
+                           conv2d_v1_2::get_examples_3_H3_W2_VALID_nchw());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_3_H3_W2_VALID_nchw) {
   const Model model = conv2d_v1_2::createTestModel_3_H3_W2_VALID_nchw();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_3_H3_W2_VALID_nchw);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_3_H3_W2_VALID_nchw());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3492,12 +3727,12 @@
   generated_tests::Execute(device,
                            conv2d_v1_2::createTestModel_3_H3_W2_VALID_nchw_relaxed,
                            conv2d_v1_2::is_ignored_3_H3_W2_VALID_nchw_relaxed,
-                           conv2d_v1_2::examples_3_H3_W2_VALID_nchw_relaxed);
+                           conv2d_v1_2::get_examples_3_H3_W2_VALID_nchw_relaxed());
 }
 
 TEST_F(ValidationTest, conv2d_v1_2_3_H3_W2_VALID_nchw_relaxed) {
   const Model model = conv2d_v1_2::createTestModel_3_H3_W2_VALID_nchw_relaxed();
-  const std::vector<Request> requests = createRequests(conv2d_v1_2::examples_3_H3_W2_VALID_nchw_relaxed);
+  const std::vector<Request> requests = createRequests(conv2d_v1_2::get_examples_3_H3_W2_VALID_nchw_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3515,12 +3750,12 @@
   generated_tests::Execute(device,
                            depth_to_space_v1_2::createTestModel_nhwc,
                            depth_to_space_v1_2::is_ignored_nhwc,
-                           depth_to_space_v1_2::examples_nhwc);
+                           depth_to_space_v1_2::get_examples_nhwc());
 }
 
 TEST_F(ValidationTest, depth_to_space_v1_2_nhwc) {
   const Model model = depth_to_space_v1_2::createTestModel_nhwc();
-  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::examples_nhwc);
+  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::get_examples_nhwc());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3530,12 +3765,27 @@
   generated_tests::Execute(device,
                            depth_to_space_v1_2::createTestModel_nhwc_relaxed,
                            depth_to_space_v1_2::is_ignored_nhwc_relaxed,
-                           depth_to_space_v1_2::examples_nhwc_relaxed);
+                           depth_to_space_v1_2::get_examples_nhwc_relaxed());
 }
 
 TEST_F(ValidationTest, depth_to_space_v1_2_nhwc_relaxed) {
   const Model model = depth_to_space_v1_2::createTestModel_nhwc_relaxed();
-  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::examples_nhwc_relaxed);
+  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::get_examples_nhwc_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, depth_to_space_v1_2_nhwc_float16) {
+  generated_tests::Execute(device,
+                           depth_to_space_v1_2::createTestModel_nhwc_float16,
+                           depth_to_space_v1_2::is_ignored_nhwc_float16,
+                           depth_to_space_v1_2::get_examples_nhwc_float16());
+}
+
+TEST_F(ValidationTest, depth_to_space_v1_2_nhwc_float16) {
+  const Model model = depth_to_space_v1_2::createTestModel_nhwc_float16();
+  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::get_examples_nhwc_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3545,12 +3795,12 @@
   generated_tests::Execute(device,
                            depth_to_space_v1_2::createTestModel_nhwc_quant8,
                            depth_to_space_v1_2::is_ignored_nhwc_quant8,
-                           depth_to_space_v1_2::examples_nhwc_quant8);
+                           depth_to_space_v1_2::get_examples_nhwc_quant8());
 }
 
 TEST_F(ValidationTest, depth_to_space_v1_2_nhwc_quant8) {
   const Model model = depth_to_space_v1_2::createTestModel_nhwc_quant8();
-  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::examples_nhwc_quant8);
+  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::get_examples_nhwc_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3560,12 +3810,12 @@
   generated_tests::Execute(device,
                            depth_to_space_v1_2::createTestModel_nchw,
                            depth_to_space_v1_2::is_ignored_nchw,
-                           depth_to_space_v1_2::examples_nchw);
+                           depth_to_space_v1_2::get_examples_nchw());
 }
 
 TEST_F(ValidationTest, depth_to_space_v1_2_nchw) {
   const Model model = depth_to_space_v1_2::createTestModel_nchw();
-  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::examples_nchw);
+  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::get_examples_nchw());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3575,12 +3825,27 @@
   generated_tests::Execute(device,
                            depth_to_space_v1_2::createTestModel_nchw_relaxed,
                            depth_to_space_v1_2::is_ignored_nchw_relaxed,
-                           depth_to_space_v1_2::examples_nchw_relaxed);
+                           depth_to_space_v1_2::get_examples_nchw_relaxed());
 }
 
 TEST_F(ValidationTest, depth_to_space_v1_2_nchw_relaxed) {
   const Model model = depth_to_space_v1_2::createTestModel_nchw_relaxed();
-  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::examples_nchw_relaxed);
+  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::get_examples_nchw_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, depth_to_space_v1_2_nchw_float16) {
+  generated_tests::Execute(device,
+                           depth_to_space_v1_2::createTestModel_nchw_float16,
+                           depth_to_space_v1_2::is_ignored_nchw_float16,
+                           depth_to_space_v1_2::get_examples_nchw_float16());
+}
+
+TEST_F(ValidationTest, depth_to_space_v1_2_nchw_float16) {
+  const Model model = depth_to_space_v1_2::createTestModel_nchw_float16();
+  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::get_examples_nchw_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3590,12 +3855,12 @@
   generated_tests::Execute(device,
                            depth_to_space_v1_2::createTestModel_nchw_quant8,
                            depth_to_space_v1_2::is_ignored_nchw_quant8,
-                           depth_to_space_v1_2::examples_nchw_quant8);
+                           depth_to_space_v1_2::get_examples_nchw_quant8());
 }
 
 TEST_F(ValidationTest, depth_to_space_v1_2_nchw_quant8) {
   const Model model = depth_to_space_v1_2::createTestModel_nchw_quant8();
-  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::examples_nchw_quant8);
+  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::get_examples_nchw_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3605,12 +3870,12 @@
   generated_tests::Execute(device,
                            depth_to_space_v1_2::createTestModel_nhwc_2,
                            depth_to_space_v1_2::is_ignored_nhwc_2,
-                           depth_to_space_v1_2::examples_nhwc_2);
+                           depth_to_space_v1_2::get_examples_nhwc_2());
 }
 
 TEST_F(ValidationTest, depth_to_space_v1_2_nhwc_2) {
   const Model model = depth_to_space_v1_2::createTestModel_nhwc_2();
-  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::examples_nhwc_2);
+  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::get_examples_nhwc_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3620,12 +3885,27 @@
   generated_tests::Execute(device,
                            depth_to_space_v1_2::createTestModel_nhwc_relaxed_2,
                            depth_to_space_v1_2::is_ignored_nhwc_relaxed_2,
-                           depth_to_space_v1_2::examples_nhwc_relaxed_2);
+                           depth_to_space_v1_2::get_examples_nhwc_relaxed_2());
 }
 
 TEST_F(ValidationTest, depth_to_space_v1_2_nhwc_relaxed_2) {
   const Model model = depth_to_space_v1_2::createTestModel_nhwc_relaxed_2();
-  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::examples_nhwc_relaxed_2);
+  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::get_examples_nhwc_relaxed_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, depth_to_space_v1_2_nhwc_float16_2) {
+  generated_tests::Execute(device,
+                           depth_to_space_v1_2::createTestModel_nhwc_float16_2,
+                           depth_to_space_v1_2::is_ignored_nhwc_float16_2,
+                           depth_to_space_v1_2::get_examples_nhwc_float16_2());
+}
+
+TEST_F(ValidationTest, depth_to_space_v1_2_nhwc_float16_2) {
+  const Model model = depth_to_space_v1_2::createTestModel_nhwc_float16_2();
+  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::get_examples_nhwc_float16_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3635,12 +3915,12 @@
   generated_tests::Execute(device,
                            depth_to_space_v1_2::createTestModel_nhwc_quant8_2,
                            depth_to_space_v1_2::is_ignored_nhwc_quant8_2,
-                           depth_to_space_v1_2::examples_nhwc_quant8_2);
+                           depth_to_space_v1_2::get_examples_nhwc_quant8_2());
 }
 
 TEST_F(ValidationTest, depth_to_space_v1_2_nhwc_quant8_2) {
   const Model model = depth_to_space_v1_2::createTestModel_nhwc_quant8_2();
-  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::examples_nhwc_quant8_2);
+  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::get_examples_nhwc_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3650,12 +3930,12 @@
   generated_tests::Execute(device,
                            depth_to_space_v1_2::createTestModel_nchw_2,
                            depth_to_space_v1_2::is_ignored_nchw_2,
-                           depth_to_space_v1_2::examples_nchw_2);
+                           depth_to_space_v1_2::get_examples_nchw_2());
 }
 
 TEST_F(ValidationTest, depth_to_space_v1_2_nchw_2) {
   const Model model = depth_to_space_v1_2::createTestModel_nchw_2();
-  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::examples_nchw_2);
+  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::get_examples_nchw_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3665,12 +3945,27 @@
   generated_tests::Execute(device,
                            depth_to_space_v1_2::createTestModel_nchw_relaxed_2,
                            depth_to_space_v1_2::is_ignored_nchw_relaxed_2,
-                           depth_to_space_v1_2::examples_nchw_relaxed_2);
+                           depth_to_space_v1_2::get_examples_nchw_relaxed_2());
 }
 
 TEST_F(ValidationTest, depth_to_space_v1_2_nchw_relaxed_2) {
   const Model model = depth_to_space_v1_2::createTestModel_nchw_relaxed_2();
-  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::examples_nchw_relaxed_2);
+  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::get_examples_nchw_relaxed_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, depth_to_space_v1_2_nchw_float16_2) {
+  generated_tests::Execute(device,
+                           depth_to_space_v1_2::createTestModel_nchw_float16_2,
+                           depth_to_space_v1_2::is_ignored_nchw_float16_2,
+                           depth_to_space_v1_2::get_examples_nchw_float16_2());
+}
+
+TEST_F(ValidationTest, depth_to_space_v1_2_nchw_float16_2) {
+  const Model model = depth_to_space_v1_2::createTestModel_nchw_float16_2();
+  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::get_examples_nchw_float16_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3680,12 +3975,12 @@
   generated_tests::Execute(device,
                            depth_to_space_v1_2::createTestModel_nchw_quant8_2,
                            depth_to_space_v1_2::is_ignored_nchw_quant8_2,
-                           depth_to_space_v1_2::examples_nchw_quant8_2);
+                           depth_to_space_v1_2::get_examples_nchw_quant8_2());
 }
 
 TEST_F(ValidationTest, depth_to_space_v1_2_nchw_quant8_2) {
   const Model model = depth_to_space_v1_2::createTestModel_nchw_quant8_2();
-  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::examples_nchw_quant8_2);
+  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::get_examples_nchw_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3695,12 +3990,12 @@
   generated_tests::Execute(device,
                            depth_to_space_v1_2::createTestModel_nhwc_3,
                            depth_to_space_v1_2::is_ignored_nhwc_3,
-                           depth_to_space_v1_2::examples_nhwc_3);
+                           depth_to_space_v1_2::get_examples_nhwc_3());
 }
 
 TEST_F(ValidationTest, depth_to_space_v1_2_nhwc_3) {
   const Model model = depth_to_space_v1_2::createTestModel_nhwc_3();
-  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::examples_nhwc_3);
+  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::get_examples_nhwc_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3710,12 +4005,27 @@
   generated_tests::Execute(device,
                            depth_to_space_v1_2::createTestModel_nhwc_relaxed_3,
                            depth_to_space_v1_2::is_ignored_nhwc_relaxed_3,
-                           depth_to_space_v1_2::examples_nhwc_relaxed_3);
+                           depth_to_space_v1_2::get_examples_nhwc_relaxed_3());
 }
 
 TEST_F(ValidationTest, depth_to_space_v1_2_nhwc_relaxed_3) {
   const Model model = depth_to_space_v1_2::createTestModel_nhwc_relaxed_3();
-  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::examples_nhwc_relaxed_3);
+  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::get_examples_nhwc_relaxed_3());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, depth_to_space_v1_2_nhwc_float16_3) {
+  generated_tests::Execute(device,
+                           depth_to_space_v1_2::createTestModel_nhwc_float16_3,
+                           depth_to_space_v1_2::is_ignored_nhwc_float16_3,
+                           depth_to_space_v1_2::get_examples_nhwc_float16_3());
+}
+
+TEST_F(ValidationTest, depth_to_space_v1_2_nhwc_float16_3) {
+  const Model model = depth_to_space_v1_2::createTestModel_nhwc_float16_3();
+  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::get_examples_nhwc_float16_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3725,12 +4035,12 @@
   generated_tests::Execute(device,
                            depth_to_space_v1_2::createTestModel_nhwc_quant8_3,
                            depth_to_space_v1_2::is_ignored_nhwc_quant8_3,
-                           depth_to_space_v1_2::examples_nhwc_quant8_3);
+                           depth_to_space_v1_2::get_examples_nhwc_quant8_3());
 }
 
 TEST_F(ValidationTest, depth_to_space_v1_2_nhwc_quant8_3) {
   const Model model = depth_to_space_v1_2::createTestModel_nhwc_quant8_3();
-  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::examples_nhwc_quant8_3);
+  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::get_examples_nhwc_quant8_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3740,12 +4050,12 @@
   generated_tests::Execute(device,
                            depth_to_space_v1_2::createTestModel_nchw_3,
                            depth_to_space_v1_2::is_ignored_nchw_3,
-                           depth_to_space_v1_2::examples_nchw_3);
+                           depth_to_space_v1_2::get_examples_nchw_3());
 }
 
 TEST_F(ValidationTest, depth_to_space_v1_2_nchw_3) {
   const Model model = depth_to_space_v1_2::createTestModel_nchw_3();
-  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::examples_nchw_3);
+  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::get_examples_nchw_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3755,12 +4065,27 @@
   generated_tests::Execute(device,
                            depth_to_space_v1_2::createTestModel_nchw_relaxed_3,
                            depth_to_space_v1_2::is_ignored_nchw_relaxed_3,
-                           depth_to_space_v1_2::examples_nchw_relaxed_3);
+                           depth_to_space_v1_2::get_examples_nchw_relaxed_3());
 }
 
 TEST_F(ValidationTest, depth_to_space_v1_2_nchw_relaxed_3) {
   const Model model = depth_to_space_v1_2::createTestModel_nchw_relaxed_3();
-  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::examples_nchw_relaxed_3);
+  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::get_examples_nchw_relaxed_3());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, depth_to_space_v1_2_nchw_float16_3) {
+  generated_tests::Execute(device,
+                           depth_to_space_v1_2::createTestModel_nchw_float16_3,
+                           depth_to_space_v1_2::is_ignored_nchw_float16_3,
+                           depth_to_space_v1_2::get_examples_nchw_float16_3());
+}
+
+TEST_F(ValidationTest, depth_to_space_v1_2_nchw_float16_3) {
+  const Model model = depth_to_space_v1_2::createTestModel_nchw_float16_3();
+  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::get_examples_nchw_float16_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3770,12 +4095,12 @@
   generated_tests::Execute(device,
                            depth_to_space_v1_2::createTestModel_nchw_quant8_3,
                            depth_to_space_v1_2::is_ignored_nchw_quant8_3,
-                           depth_to_space_v1_2::examples_nchw_quant8_3);
+                           depth_to_space_v1_2::get_examples_nchw_quant8_3());
 }
 
 TEST_F(ValidationTest, depth_to_space_v1_2_nchw_quant8_3) {
   const Model model = depth_to_space_v1_2::createTestModel_nchw_quant8_3();
-  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::examples_nchw_quant8_3);
+  const std::vector<Request> requests = createRequests(depth_to_space_v1_2::get_examples_nchw_quant8_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3793,12 +4118,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_nhwc,
                            depthwise_conv2d_v1_2::is_ignored_nhwc,
-                           depthwise_conv2d_v1_2::examples_nhwc);
+                           depthwise_conv2d_v1_2::get_examples_nhwc());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_nhwc) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_nhwc();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_nhwc);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nhwc());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3808,12 +4133,27 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_nhwc_relaxed,
                            depthwise_conv2d_v1_2::is_ignored_nhwc_relaxed,
-                           depthwise_conv2d_v1_2::examples_nhwc_relaxed);
+                           depthwise_conv2d_v1_2::get_examples_nhwc_relaxed());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_nhwc_relaxed) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_nhwc_relaxed();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_nhwc_relaxed);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nhwc_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_v1_2_nhwc_float16) {
+  generated_tests::Execute(device,
+                           depthwise_conv2d_v1_2::createTestModel_nhwc_float16,
+                           depthwise_conv2d_v1_2::is_ignored_nhwc_float16,
+                           depthwise_conv2d_v1_2::get_examples_nhwc_float16());
+}
+
+TEST_F(ValidationTest, depthwise_conv2d_v1_2_nhwc_float16) {
+  const Model model = depthwise_conv2d_v1_2::createTestModel_nhwc_float16();
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nhwc_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3823,12 +4163,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_nhwc_quant8,
                            depthwise_conv2d_v1_2::is_ignored_nhwc_quant8,
-                           depthwise_conv2d_v1_2::examples_nhwc_quant8);
+                           depthwise_conv2d_v1_2::get_examples_nhwc_quant8());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_nhwc_quant8) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_nhwc_quant8();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_nhwc_quant8);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nhwc_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3838,12 +4178,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_nhwc_weight_as_input,
                            depthwise_conv2d_v1_2::is_ignored_nhwc_weight_as_input,
-                           depthwise_conv2d_v1_2::examples_nhwc_weight_as_input);
+                           depthwise_conv2d_v1_2::get_examples_nhwc_weight_as_input());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_nhwc_weight_as_input) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_nhwc_weight_as_input();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_nhwc_weight_as_input);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nhwc_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3853,12 +4193,27 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_nhwc_weight_as_input_relaxed,
                            depthwise_conv2d_v1_2::is_ignored_nhwc_weight_as_input_relaxed,
-                           depthwise_conv2d_v1_2::examples_nhwc_weight_as_input_relaxed);
+                           depthwise_conv2d_v1_2::get_examples_nhwc_weight_as_input_relaxed());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_nhwc_weight_as_input_relaxed) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_nhwc_weight_as_input_relaxed();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_nhwc_weight_as_input_relaxed);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nhwc_weight_as_input_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_v1_2_nhwc_weight_as_input_float16) {
+  generated_tests::Execute(device,
+                           depthwise_conv2d_v1_2::createTestModel_nhwc_weight_as_input_float16,
+                           depthwise_conv2d_v1_2::is_ignored_nhwc_weight_as_input_float16,
+                           depthwise_conv2d_v1_2::get_examples_nhwc_weight_as_input_float16());
+}
+
+TEST_F(ValidationTest, depthwise_conv2d_v1_2_nhwc_weight_as_input_float16) {
+  const Model model = depthwise_conv2d_v1_2::createTestModel_nhwc_weight_as_input_float16();
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nhwc_weight_as_input_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3868,12 +4223,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_nhwc_weight_as_input_quant8,
                            depthwise_conv2d_v1_2::is_ignored_nhwc_weight_as_input_quant8,
-                           depthwise_conv2d_v1_2::examples_nhwc_weight_as_input_quant8);
+                           depthwise_conv2d_v1_2::get_examples_nhwc_weight_as_input_quant8());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_nhwc_weight_as_input_quant8) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_nhwc_weight_as_input_quant8();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_nhwc_weight_as_input_quant8);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nhwc_weight_as_input_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3883,12 +4238,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_nchw,
                            depthwise_conv2d_v1_2::is_ignored_nchw,
-                           depthwise_conv2d_v1_2::examples_nchw);
+                           depthwise_conv2d_v1_2::get_examples_nchw());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_nchw) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_nchw();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_nchw);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nchw());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3898,12 +4253,27 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_nchw_relaxed,
                            depthwise_conv2d_v1_2::is_ignored_nchw_relaxed,
-                           depthwise_conv2d_v1_2::examples_nchw_relaxed);
+                           depthwise_conv2d_v1_2::get_examples_nchw_relaxed());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_nchw_relaxed) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_nchw_relaxed();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_nchw_relaxed);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nchw_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_v1_2_nchw_float16) {
+  generated_tests::Execute(device,
+                           depthwise_conv2d_v1_2::createTestModel_nchw_float16,
+                           depthwise_conv2d_v1_2::is_ignored_nchw_float16,
+                           depthwise_conv2d_v1_2::get_examples_nchw_float16());
+}
+
+TEST_F(ValidationTest, depthwise_conv2d_v1_2_nchw_float16) {
+  const Model model = depthwise_conv2d_v1_2::createTestModel_nchw_float16();
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nchw_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3913,12 +4283,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_nchw_quant8,
                            depthwise_conv2d_v1_2::is_ignored_nchw_quant8,
-                           depthwise_conv2d_v1_2::examples_nchw_quant8);
+                           depthwise_conv2d_v1_2::get_examples_nchw_quant8());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_nchw_quant8) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_nchw_quant8();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_nchw_quant8);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nchw_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3928,12 +4298,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_nchw_weight_as_input,
                            depthwise_conv2d_v1_2::is_ignored_nchw_weight_as_input,
-                           depthwise_conv2d_v1_2::examples_nchw_weight_as_input);
+                           depthwise_conv2d_v1_2::get_examples_nchw_weight_as_input());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_nchw_weight_as_input) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_nchw_weight_as_input();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_nchw_weight_as_input);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nchw_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3943,12 +4313,27 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_nchw_weight_as_input_relaxed,
                            depthwise_conv2d_v1_2::is_ignored_nchw_weight_as_input_relaxed,
-                           depthwise_conv2d_v1_2::examples_nchw_weight_as_input_relaxed);
+                           depthwise_conv2d_v1_2::get_examples_nchw_weight_as_input_relaxed());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_nchw_weight_as_input_relaxed) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_nchw_weight_as_input_relaxed();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_nchw_weight_as_input_relaxed);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nchw_weight_as_input_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_v1_2_nchw_weight_as_input_float16) {
+  generated_tests::Execute(device,
+                           depthwise_conv2d_v1_2::createTestModel_nchw_weight_as_input_float16,
+                           depthwise_conv2d_v1_2::is_ignored_nchw_weight_as_input_float16,
+                           depthwise_conv2d_v1_2::get_examples_nchw_weight_as_input_float16());
+}
+
+TEST_F(ValidationTest, depthwise_conv2d_v1_2_nchw_weight_as_input_float16) {
+  const Model model = depthwise_conv2d_v1_2::createTestModel_nchw_weight_as_input_float16();
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nchw_weight_as_input_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3958,12 +4343,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_nchw_weight_as_input_quant8,
                            depthwise_conv2d_v1_2::is_ignored_nchw_weight_as_input_quant8,
-                           depthwise_conv2d_v1_2::examples_nchw_weight_as_input_quant8);
+                           depthwise_conv2d_v1_2::get_examples_nchw_weight_as_input_quant8());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_nchw_weight_as_input_quant8) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_nchw_weight_as_input_quant8();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_nchw_weight_as_input_quant8);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nchw_weight_as_input_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3973,12 +4358,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_nhwc_2,
                            depthwise_conv2d_v1_2::is_ignored_nhwc_2,
-                           depthwise_conv2d_v1_2::examples_nhwc_2);
+                           depthwise_conv2d_v1_2::get_examples_nhwc_2());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_nhwc_2) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_nhwc_2();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_nhwc_2);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nhwc_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -3988,12 +4373,27 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_nhwc_relaxed_2,
                            depthwise_conv2d_v1_2::is_ignored_nhwc_relaxed_2,
-                           depthwise_conv2d_v1_2::examples_nhwc_relaxed_2);
+                           depthwise_conv2d_v1_2::get_examples_nhwc_relaxed_2());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_nhwc_relaxed_2) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_nhwc_relaxed_2();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_nhwc_relaxed_2);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nhwc_relaxed_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_v1_2_nhwc_float16_2) {
+  generated_tests::Execute(device,
+                           depthwise_conv2d_v1_2::createTestModel_nhwc_float16_2,
+                           depthwise_conv2d_v1_2::is_ignored_nhwc_float16_2,
+                           depthwise_conv2d_v1_2::get_examples_nhwc_float16_2());
+}
+
+TEST_F(ValidationTest, depthwise_conv2d_v1_2_nhwc_float16_2) {
+  const Model model = depthwise_conv2d_v1_2::createTestModel_nhwc_float16_2();
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nhwc_float16_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4003,12 +4403,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_nhwc_quant8_2,
                            depthwise_conv2d_v1_2::is_ignored_nhwc_quant8_2,
-                           depthwise_conv2d_v1_2::examples_nhwc_quant8_2);
+                           depthwise_conv2d_v1_2::get_examples_nhwc_quant8_2());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_nhwc_quant8_2) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_nhwc_quant8_2();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_nhwc_quant8_2);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nhwc_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4018,12 +4418,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_nhwc_weight_as_input_2,
                            depthwise_conv2d_v1_2::is_ignored_nhwc_weight_as_input_2,
-                           depthwise_conv2d_v1_2::examples_nhwc_weight_as_input_2);
+                           depthwise_conv2d_v1_2::get_examples_nhwc_weight_as_input_2());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_nhwc_weight_as_input_2) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_nhwc_weight_as_input_2();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_nhwc_weight_as_input_2);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nhwc_weight_as_input_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4033,12 +4433,27 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_nhwc_weight_as_input_relaxed_2,
                            depthwise_conv2d_v1_2::is_ignored_nhwc_weight_as_input_relaxed_2,
-                           depthwise_conv2d_v1_2::examples_nhwc_weight_as_input_relaxed_2);
+                           depthwise_conv2d_v1_2::get_examples_nhwc_weight_as_input_relaxed_2());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_nhwc_weight_as_input_relaxed_2) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_nhwc_weight_as_input_relaxed_2();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_nhwc_weight_as_input_relaxed_2);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nhwc_weight_as_input_relaxed_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_v1_2_nhwc_weight_as_input_float16_2) {
+  generated_tests::Execute(device,
+                           depthwise_conv2d_v1_2::createTestModel_nhwc_weight_as_input_float16_2,
+                           depthwise_conv2d_v1_2::is_ignored_nhwc_weight_as_input_float16_2,
+                           depthwise_conv2d_v1_2::get_examples_nhwc_weight_as_input_float16_2());
+}
+
+TEST_F(ValidationTest, depthwise_conv2d_v1_2_nhwc_weight_as_input_float16_2) {
+  const Model model = depthwise_conv2d_v1_2::createTestModel_nhwc_weight_as_input_float16_2();
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nhwc_weight_as_input_float16_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4048,12 +4463,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_nhwc_weight_as_input_quant8_2,
                            depthwise_conv2d_v1_2::is_ignored_nhwc_weight_as_input_quant8_2,
-                           depthwise_conv2d_v1_2::examples_nhwc_weight_as_input_quant8_2);
+                           depthwise_conv2d_v1_2::get_examples_nhwc_weight_as_input_quant8_2());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_nhwc_weight_as_input_quant8_2) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_nhwc_weight_as_input_quant8_2();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_nhwc_weight_as_input_quant8_2);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nhwc_weight_as_input_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4063,12 +4478,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_nchw_2,
                            depthwise_conv2d_v1_2::is_ignored_nchw_2,
-                           depthwise_conv2d_v1_2::examples_nchw_2);
+                           depthwise_conv2d_v1_2::get_examples_nchw_2());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_nchw_2) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_nchw_2();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_nchw_2);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nchw_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4078,12 +4493,27 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_nchw_relaxed_2,
                            depthwise_conv2d_v1_2::is_ignored_nchw_relaxed_2,
-                           depthwise_conv2d_v1_2::examples_nchw_relaxed_2);
+                           depthwise_conv2d_v1_2::get_examples_nchw_relaxed_2());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_nchw_relaxed_2) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_nchw_relaxed_2();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_nchw_relaxed_2);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nchw_relaxed_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_v1_2_nchw_float16_2) {
+  generated_tests::Execute(device,
+                           depthwise_conv2d_v1_2::createTestModel_nchw_float16_2,
+                           depthwise_conv2d_v1_2::is_ignored_nchw_float16_2,
+                           depthwise_conv2d_v1_2::get_examples_nchw_float16_2());
+}
+
+TEST_F(ValidationTest, depthwise_conv2d_v1_2_nchw_float16_2) {
+  const Model model = depthwise_conv2d_v1_2::createTestModel_nchw_float16_2();
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nchw_float16_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4093,12 +4523,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_nchw_quant8_2,
                            depthwise_conv2d_v1_2::is_ignored_nchw_quant8_2,
-                           depthwise_conv2d_v1_2::examples_nchw_quant8_2);
+                           depthwise_conv2d_v1_2::get_examples_nchw_quant8_2());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_nchw_quant8_2) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_nchw_quant8_2();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_nchw_quant8_2);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nchw_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4108,12 +4538,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_nchw_weight_as_input_2,
                            depthwise_conv2d_v1_2::is_ignored_nchw_weight_as_input_2,
-                           depthwise_conv2d_v1_2::examples_nchw_weight_as_input_2);
+                           depthwise_conv2d_v1_2::get_examples_nchw_weight_as_input_2());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_nchw_weight_as_input_2) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_nchw_weight_as_input_2();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_nchw_weight_as_input_2);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nchw_weight_as_input_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4123,12 +4553,27 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_nchw_weight_as_input_relaxed_2,
                            depthwise_conv2d_v1_2::is_ignored_nchw_weight_as_input_relaxed_2,
-                           depthwise_conv2d_v1_2::examples_nchw_weight_as_input_relaxed_2);
+                           depthwise_conv2d_v1_2::get_examples_nchw_weight_as_input_relaxed_2());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_nchw_weight_as_input_relaxed_2) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_nchw_weight_as_input_relaxed_2();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_nchw_weight_as_input_relaxed_2);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nchw_weight_as_input_relaxed_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_v1_2_nchw_weight_as_input_float16_2) {
+  generated_tests::Execute(device,
+                           depthwise_conv2d_v1_2::createTestModel_nchw_weight_as_input_float16_2,
+                           depthwise_conv2d_v1_2::is_ignored_nchw_weight_as_input_float16_2,
+                           depthwise_conv2d_v1_2::get_examples_nchw_weight_as_input_float16_2());
+}
+
+TEST_F(ValidationTest, depthwise_conv2d_v1_2_nchw_weight_as_input_float16_2) {
+  const Model model = depthwise_conv2d_v1_2::createTestModel_nchw_weight_as_input_float16_2();
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nchw_weight_as_input_float16_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4138,12 +4583,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_nchw_weight_as_input_quant8_2,
                            depthwise_conv2d_v1_2::is_ignored_nchw_weight_as_input_quant8_2,
-                           depthwise_conv2d_v1_2::examples_nchw_weight_as_input_quant8_2);
+                           depthwise_conv2d_v1_2::get_examples_nchw_weight_as_input_quant8_2());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_nchw_weight_as_input_quant8_2) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_nchw_weight_as_input_quant8_2();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_nchw_weight_as_input_quant8_2);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_nchw_weight_as_input_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4153,12 +4598,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_large_nhwc,
                            depthwise_conv2d_v1_2::is_ignored_large_nhwc,
-                           depthwise_conv2d_v1_2::examples_large_nhwc);
+                           depthwise_conv2d_v1_2::get_examples_large_nhwc());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nhwc) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_large_nhwc();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_large_nhwc);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nhwc());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4168,12 +4613,27 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_large_nhwc_relaxed,
                            depthwise_conv2d_v1_2::is_ignored_large_nhwc_relaxed,
-                           depthwise_conv2d_v1_2::examples_large_nhwc_relaxed);
+                           depthwise_conv2d_v1_2::get_examples_large_nhwc_relaxed());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nhwc_relaxed) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_large_nhwc_relaxed();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_large_nhwc_relaxed);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nhwc_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_v1_2_large_nhwc_float16) {
+  generated_tests::Execute(device,
+                           depthwise_conv2d_v1_2::createTestModel_large_nhwc_float16,
+                           depthwise_conv2d_v1_2::is_ignored_large_nhwc_float16,
+                           depthwise_conv2d_v1_2::get_examples_large_nhwc_float16());
+}
+
+TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nhwc_float16) {
+  const Model model = depthwise_conv2d_v1_2::createTestModel_large_nhwc_float16();
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nhwc_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4183,12 +4643,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_large_nhwc_quant8,
                            depthwise_conv2d_v1_2::is_ignored_large_nhwc_quant8,
-                           depthwise_conv2d_v1_2::examples_large_nhwc_quant8);
+                           depthwise_conv2d_v1_2::get_examples_large_nhwc_quant8());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nhwc_quant8) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_large_nhwc_quant8();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_large_nhwc_quant8);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nhwc_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4198,12 +4658,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_large_nhwc_weight_as_input,
                            depthwise_conv2d_v1_2::is_ignored_large_nhwc_weight_as_input,
-                           depthwise_conv2d_v1_2::examples_large_nhwc_weight_as_input);
+                           depthwise_conv2d_v1_2::get_examples_large_nhwc_weight_as_input());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nhwc_weight_as_input) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_large_nhwc_weight_as_input();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_large_nhwc_weight_as_input);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nhwc_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4213,12 +4673,27 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_large_nhwc_weight_as_input_relaxed,
                            depthwise_conv2d_v1_2::is_ignored_large_nhwc_weight_as_input_relaxed,
-                           depthwise_conv2d_v1_2::examples_large_nhwc_weight_as_input_relaxed);
+                           depthwise_conv2d_v1_2::get_examples_large_nhwc_weight_as_input_relaxed());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nhwc_weight_as_input_relaxed) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_large_nhwc_weight_as_input_relaxed();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_large_nhwc_weight_as_input_relaxed);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nhwc_weight_as_input_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_v1_2_large_nhwc_weight_as_input_float16) {
+  generated_tests::Execute(device,
+                           depthwise_conv2d_v1_2::createTestModel_large_nhwc_weight_as_input_float16,
+                           depthwise_conv2d_v1_2::is_ignored_large_nhwc_weight_as_input_float16,
+                           depthwise_conv2d_v1_2::get_examples_large_nhwc_weight_as_input_float16());
+}
+
+TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nhwc_weight_as_input_float16) {
+  const Model model = depthwise_conv2d_v1_2::createTestModel_large_nhwc_weight_as_input_float16();
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nhwc_weight_as_input_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4228,12 +4703,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_large_nhwc_weight_as_input_quant8,
                            depthwise_conv2d_v1_2::is_ignored_large_nhwc_weight_as_input_quant8,
-                           depthwise_conv2d_v1_2::examples_large_nhwc_weight_as_input_quant8);
+                           depthwise_conv2d_v1_2::get_examples_large_nhwc_weight_as_input_quant8());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nhwc_weight_as_input_quant8) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_large_nhwc_weight_as_input_quant8();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_large_nhwc_weight_as_input_quant8);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nhwc_weight_as_input_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4243,12 +4718,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_large_nchw,
                            depthwise_conv2d_v1_2::is_ignored_large_nchw,
-                           depthwise_conv2d_v1_2::examples_large_nchw);
+                           depthwise_conv2d_v1_2::get_examples_large_nchw());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nchw) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_large_nchw();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_large_nchw);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nchw());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4258,12 +4733,27 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_large_nchw_relaxed,
                            depthwise_conv2d_v1_2::is_ignored_large_nchw_relaxed,
-                           depthwise_conv2d_v1_2::examples_large_nchw_relaxed);
+                           depthwise_conv2d_v1_2::get_examples_large_nchw_relaxed());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nchw_relaxed) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_large_nchw_relaxed();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_large_nchw_relaxed);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nchw_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_v1_2_large_nchw_float16) {
+  generated_tests::Execute(device,
+                           depthwise_conv2d_v1_2::createTestModel_large_nchw_float16,
+                           depthwise_conv2d_v1_2::is_ignored_large_nchw_float16,
+                           depthwise_conv2d_v1_2::get_examples_large_nchw_float16());
+}
+
+TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nchw_float16) {
+  const Model model = depthwise_conv2d_v1_2::createTestModel_large_nchw_float16();
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nchw_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4273,12 +4763,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_large_nchw_quant8,
                            depthwise_conv2d_v1_2::is_ignored_large_nchw_quant8,
-                           depthwise_conv2d_v1_2::examples_large_nchw_quant8);
+                           depthwise_conv2d_v1_2::get_examples_large_nchw_quant8());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nchw_quant8) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_large_nchw_quant8();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_large_nchw_quant8);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nchw_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4288,12 +4778,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_large_nchw_weight_as_input,
                            depthwise_conv2d_v1_2::is_ignored_large_nchw_weight_as_input,
-                           depthwise_conv2d_v1_2::examples_large_nchw_weight_as_input);
+                           depthwise_conv2d_v1_2::get_examples_large_nchw_weight_as_input());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nchw_weight_as_input) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_large_nchw_weight_as_input();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_large_nchw_weight_as_input);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nchw_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4303,12 +4793,27 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_large_nchw_weight_as_input_relaxed,
                            depthwise_conv2d_v1_2::is_ignored_large_nchw_weight_as_input_relaxed,
-                           depthwise_conv2d_v1_2::examples_large_nchw_weight_as_input_relaxed);
+                           depthwise_conv2d_v1_2::get_examples_large_nchw_weight_as_input_relaxed());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nchw_weight_as_input_relaxed) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_large_nchw_weight_as_input_relaxed();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_large_nchw_weight_as_input_relaxed);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nchw_weight_as_input_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_v1_2_large_nchw_weight_as_input_float16) {
+  generated_tests::Execute(device,
+                           depthwise_conv2d_v1_2::createTestModel_large_nchw_weight_as_input_float16,
+                           depthwise_conv2d_v1_2::is_ignored_large_nchw_weight_as_input_float16,
+                           depthwise_conv2d_v1_2::get_examples_large_nchw_weight_as_input_float16());
+}
+
+TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nchw_weight_as_input_float16) {
+  const Model model = depthwise_conv2d_v1_2::createTestModel_large_nchw_weight_as_input_float16();
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nchw_weight_as_input_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4318,12 +4823,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_large_nchw_weight_as_input_quant8,
                            depthwise_conv2d_v1_2::is_ignored_large_nchw_weight_as_input_quant8,
-                           depthwise_conv2d_v1_2::examples_large_nchw_weight_as_input_quant8);
+                           depthwise_conv2d_v1_2::get_examples_large_nchw_weight_as_input_quant8());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nchw_weight_as_input_quant8) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_large_nchw_weight_as_input_quant8();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_large_nchw_weight_as_input_quant8);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nchw_weight_as_input_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4333,12 +4838,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_large_nhwc_2,
                            depthwise_conv2d_v1_2::is_ignored_large_nhwc_2,
-                           depthwise_conv2d_v1_2::examples_large_nhwc_2);
+                           depthwise_conv2d_v1_2::get_examples_large_nhwc_2());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nhwc_2) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_large_nhwc_2();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_large_nhwc_2);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nhwc_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4348,12 +4853,27 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_large_nhwc_relaxed_2,
                            depthwise_conv2d_v1_2::is_ignored_large_nhwc_relaxed_2,
-                           depthwise_conv2d_v1_2::examples_large_nhwc_relaxed_2);
+                           depthwise_conv2d_v1_2::get_examples_large_nhwc_relaxed_2());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nhwc_relaxed_2) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_large_nhwc_relaxed_2();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_large_nhwc_relaxed_2);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nhwc_relaxed_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_v1_2_large_nhwc_float16_2) {
+  generated_tests::Execute(device,
+                           depthwise_conv2d_v1_2::createTestModel_large_nhwc_float16_2,
+                           depthwise_conv2d_v1_2::is_ignored_large_nhwc_float16_2,
+                           depthwise_conv2d_v1_2::get_examples_large_nhwc_float16_2());
+}
+
+TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nhwc_float16_2) {
+  const Model model = depthwise_conv2d_v1_2::createTestModel_large_nhwc_float16_2();
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nhwc_float16_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4363,12 +4883,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_large_nhwc_quant8_2,
                            depthwise_conv2d_v1_2::is_ignored_large_nhwc_quant8_2,
-                           depthwise_conv2d_v1_2::examples_large_nhwc_quant8_2);
+                           depthwise_conv2d_v1_2::get_examples_large_nhwc_quant8_2());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nhwc_quant8_2) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_large_nhwc_quant8_2();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_large_nhwc_quant8_2);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nhwc_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4378,12 +4898,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_large_nhwc_weight_as_input_2,
                            depthwise_conv2d_v1_2::is_ignored_large_nhwc_weight_as_input_2,
-                           depthwise_conv2d_v1_2::examples_large_nhwc_weight_as_input_2);
+                           depthwise_conv2d_v1_2::get_examples_large_nhwc_weight_as_input_2());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nhwc_weight_as_input_2) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_large_nhwc_weight_as_input_2();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_large_nhwc_weight_as_input_2);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nhwc_weight_as_input_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4393,12 +4913,27 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_large_nhwc_weight_as_input_relaxed_2,
                            depthwise_conv2d_v1_2::is_ignored_large_nhwc_weight_as_input_relaxed_2,
-                           depthwise_conv2d_v1_2::examples_large_nhwc_weight_as_input_relaxed_2);
+                           depthwise_conv2d_v1_2::get_examples_large_nhwc_weight_as_input_relaxed_2());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nhwc_weight_as_input_relaxed_2) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_large_nhwc_weight_as_input_relaxed_2();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_large_nhwc_weight_as_input_relaxed_2);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nhwc_weight_as_input_relaxed_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_v1_2_large_nhwc_weight_as_input_float16_2) {
+  generated_tests::Execute(device,
+                           depthwise_conv2d_v1_2::createTestModel_large_nhwc_weight_as_input_float16_2,
+                           depthwise_conv2d_v1_2::is_ignored_large_nhwc_weight_as_input_float16_2,
+                           depthwise_conv2d_v1_2::get_examples_large_nhwc_weight_as_input_float16_2());
+}
+
+TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nhwc_weight_as_input_float16_2) {
+  const Model model = depthwise_conv2d_v1_2::createTestModel_large_nhwc_weight_as_input_float16_2();
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nhwc_weight_as_input_float16_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4408,12 +4943,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_large_nhwc_weight_as_input_quant8_2,
                            depthwise_conv2d_v1_2::is_ignored_large_nhwc_weight_as_input_quant8_2,
-                           depthwise_conv2d_v1_2::examples_large_nhwc_weight_as_input_quant8_2);
+                           depthwise_conv2d_v1_2::get_examples_large_nhwc_weight_as_input_quant8_2());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nhwc_weight_as_input_quant8_2) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_large_nhwc_weight_as_input_quant8_2();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_large_nhwc_weight_as_input_quant8_2);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nhwc_weight_as_input_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4423,12 +4958,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_large_nchw_2,
                            depthwise_conv2d_v1_2::is_ignored_large_nchw_2,
-                           depthwise_conv2d_v1_2::examples_large_nchw_2);
+                           depthwise_conv2d_v1_2::get_examples_large_nchw_2());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nchw_2) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_large_nchw_2();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_large_nchw_2);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nchw_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4438,12 +4973,27 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_large_nchw_relaxed_2,
                            depthwise_conv2d_v1_2::is_ignored_large_nchw_relaxed_2,
-                           depthwise_conv2d_v1_2::examples_large_nchw_relaxed_2);
+                           depthwise_conv2d_v1_2::get_examples_large_nchw_relaxed_2());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nchw_relaxed_2) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_large_nchw_relaxed_2();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_large_nchw_relaxed_2);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nchw_relaxed_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_v1_2_large_nchw_float16_2) {
+  generated_tests::Execute(device,
+                           depthwise_conv2d_v1_2::createTestModel_large_nchw_float16_2,
+                           depthwise_conv2d_v1_2::is_ignored_large_nchw_float16_2,
+                           depthwise_conv2d_v1_2::get_examples_large_nchw_float16_2());
+}
+
+TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nchw_float16_2) {
+  const Model model = depthwise_conv2d_v1_2::createTestModel_large_nchw_float16_2();
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nchw_float16_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4453,12 +5003,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_large_nchw_quant8_2,
                            depthwise_conv2d_v1_2::is_ignored_large_nchw_quant8_2,
-                           depthwise_conv2d_v1_2::examples_large_nchw_quant8_2);
+                           depthwise_conv2d_v1_2::get_examples_large_nchw_quant8_2());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nchw_quant8_2) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_large_nchw_quant8_2();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_large_nchw_quant8_2);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nchw_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4468,12 +5018,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_large_nchw_weight_as_input_2,
                            depthwise_conv2d_v1_2::is_ignored_large_nchw_weight_as_input_2,
-                           depthwise_conv2d_v1_2::examples_large_nchw_weight_as_input_2);
+                           depthwise_conv2d_v1_2::get_examples_large_nchw_weight_as_input_2());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nchw_weight_as_input_2) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_large_nchw_weight_as_input_2();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_large_nchw_weight_as_input_2);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nchw_weight_as_input_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4483,12 +5033,27 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_large_nchw_weight_as_input_relaxed_2,
                            depthwise_conv2d_v1_2::is_ignored_large_nchw_weight_as_input_relaxed_2,
-                           depthwise_conv2d_v1_2::examples_large_nchw_weight_as_input_relaxed_2);
+                           depthwise_conv2d_v1_2::get_examples_large_nchw_weight_as_input_relaxed_2());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nchw_weight_as_input_relaxed_2) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_large_nchw_weight_as_input_relaxed_2();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_large_nchw_weight_as_input_relaxed_2);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nchw_weight_as_input_relaxed_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_v1_2_large_nchw_weight_as_input_float16_2) {
+  generated_tests::Execute(device,
+                           depthwise_conv2d_v1_2::createTestModel_large_nchw_weight_as_input_float16_2,
+                           depthwise_conv2d_v1_2::is_ignored_large_nchw_weight_as_input_float16_2,
+                           depthwise_conv2d_v1_2::get_examples_large_nchw_weight_as_input_float16_2());
+}
+
+TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nchw_weight_as_input_float16_2) {
+  const Model model = depthwise_conv2d_v1_2::createTestModel_large_nchw_weight_as_input_float16_2();
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nchw_weight_as_input_float16_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4498,12 +5063,12 @@
   generated_tests::Execute(device,
                            depthwise_conv2d_v1_2::createTestModel_large_nchw_weight_as_input_quant8_2,
                            depthwise_conv2d_v1_2::is_ignored_large_nchw_weight_as_input_quant8_2,
-                           depthwise_conv2d_v1_2::examples_large_nchw_weight_as_input_quant8_2);
+                           depthwise_conv2d_v1_2::get_examples_large_nchw_weight_as_input_quant8_2());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_large_nchw_weight_as_input_quant8_2) {
   const Model model = depthwise_conv2d_v1_2::createTestModel_large_nchw_weight_as_input_quant8_2();
-  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::examples_large_nchw_weight_as_input_quant8_2);
+  const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_large_nchw_weight_as_input_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4521,12 +5086,12 @@
   generated_tests::Execute(device,
                            div_broadcast_float16::createTestModel,
                            div_broadcast_float16::is_ignored,
-                           div_broadcast_float16::examples);
+                           div_broadcast_float16::get_examples());
 }
 
 TEST_F(ValidationTest, div_broadcast_float16) {
   const Model model = div_broadcast_float16::createTestModel();
-  const std::vector<Request> requests = createRequests(div_broadcast_float16::examples);
+  const std::vector<Request> requests = createRequests(div_broadcast_float16::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4544,12 +5109,12 @@
   generated_tests::Execute(device,
                            div_float16::createTestModel,
                            div_float16::is_ignored,
-                           div_float16::examples);
+                           div_float16::get_examples());
 }
 
 TEST_F(ValidationTest, div_float16) {
   const Model model = div_float16::createTestModel();
-  const std::vector<Request> requests = createRequests(div_float16::examples);
+  const std::vector<Request> requests = createRequests(div_float16::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4567,12 +5132,12 @@
   generated_tests::Execute(device,
                            expand_dims::createTestModel,
                            expand_dims::is_ignored,
-                           expand_dims::examples);
+                           expand_dims::get_examples());
 }
 
 TEST_F(ValidationTest, expand_dims) {
   const Model model = expand_dims::createTestModel();
-  const std::vector<Request> requests = createRequests(expand_dims::examples);
+  const std::vector<Request> requests = createRequests(expand_dims::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4582,12 +5147,12 @@
   generated_tests::Execute(device,
                            expand_dims::createTestModel_relaxed,
                            expand_dims::is_ignored_relaxed,
-                           expand_dims::examples_relaxed);
+                           expand_dims::get_examples_relaxed());
 }
 
 TEST_F(ValidationTest, expand_dims_relaxed) {
   const Model model = expand_dims::createTestModel_relaxed();
-  const std::vector<Request> requests = createRequests(expand_dims::examples_relaxed);
+  const std::vector<Request> requests = createRequests(expand_dims::get_examples_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4597,12 +5162,12 @@
   generated_tests::Execute(device,
                            expand_dims::createTestModel_quant8,
                            expand_dims::is_ignored_quant8,
-                           expand_dims::examples_quant8);
+                           expand_dims::get_examples_quant8());
 }
 
 TEST_F(ValidationTest, expand_dims_quant8) {
   const Model model = expand_dims::createTestModel_quant8();
-  const std::vector<Request> requests = createRequests(expand_dims::examples_quant8);
+  const std::vector<Request> requests = createRequests(expand_dims::get_examples_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4612,12 +5177,12 @@
   generated_tests::Execute(device,
                            expand_dims::createTestModel_int32,
                            expand_dims::is_ignored_int32,
-                           expand_dims::examples_int32);
+                           expand_dims::get_examples_int32());
 }
 
 TEST_F(ValidationTest, expand_dims_int32) {
   const Model model = expand_dims::createTestModel_int32();
-  const std::vector<Request> requests = createRequests(expand_dims::examples_int32);
+  const std::vector<Request> requests = createRequests(expand_dims::get_examples_int32());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4627,12 +5192,12 @@
   generated_tests::Execute(device,
                            expand_dims::createTestModel_2,
                            expand_dims::is_ignored_2,
-                           expand_dims::examples_2);
+                           expand_dims::get_examples_2());
 }
 
 TEST_F(ValidationTest, expand_dims_2) {
   const Model model = expand_dims::createTestModel_2();
-  const std::vector<Request> requests = createRequests(expand_dims::examples_2);
+  const std::vector<Request> requests = createRequests(expand_dims::get_examples_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4642,12 +5207,12 @@
   generated_tests::Execute(device,
                            expand_dims::createTestModel_relaxed_2,
                            expand_dims::is_ignored_relaxed_2,
-                           expand_dims::examples_relaxed_2);
+                           expand_dims::get_examples_relaxed_2());
 }
 
 TEST_F(ValidationTest, expand_dims_relaxed_2) {
   const Model model = expand_dims::createTestModel_relaxed_2();
-  const std::vector<Request> requests = createRequests(expand_dims::examples_relaxed_2);
+  const std::vector<Request> requests = createRequests(expand_dims::get_examples_relaxed_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4657,12 +5222,12 @@
   generated_tests::Execute(device,
                            expand_dims::createTestModel_quant8_2,
                            expand_dims::is_ignored_quant8_2,
-                           expand_dims::examples_quant8_2);
+                           expand_dims::get_examples_quant8_2());
 }
 
 TEST_F(ValidationTest, expand_dims_quant8_2) {
   const Model model = expand_dims::createTestModel_quant8_2();
-  const std::vector<Request> requests = createRequests(expand_dims::examples_quant8_2);
+  const std::vector<Request> requests = createRequests(expand_dims::get_examples_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4672,12 +5237,12 @@
   generated_tests::Execute(device,
                            expand_dims::createTestModel_int32_2,
                            expand_dims::is_ignored_int32_2,
-                           expand_dims::examples_int32_2);
+                           expand_dims::get_examples_int32_2());
 }
 
 TEST_F(ValidationTest, expand_dims_int32_2) {
   const Model model = expand_dims::createTestModel_int32_2();
-  const std::vector<Request> requests = createRequests(expand_dims::examples_int32_2);
+  const std::vector<Request> requests = createRequests(expand_dims::get_examples_int32_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4687,12 +5252,12 @@
   generated_tests::Execute(device,
                            expand_dims::createTestModel_3,
                            expand_dims::is_ignored_3,
-                           expand_dims::examples_3);
+                           expand_dims::get_examples_3());
 }
 
 TEST_F(ValidationTest, expand_dims_3) {
   const Model model = expand_dims::createTestModel_3();
-  const std::vector<Request> requests = createRequests(expand_dims::examples_3);
+  const std::vector<Request> requests = createRequests(expand_dims::get_examples_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4702,12 +5267,12 @@
   generated_tests::Execute(device,
                            expand_dims::createTestModel_relaxed_3,
                            expand_dims::is_ignored_relaxed_3,
-                           expand_dims::examples_relaxed_3);
+                           expand_dims::get_examples_relaxed_3());
 }
 
 TEST_F(ValidationTest, expand_dims_relaxed_3) {
   const Model model = expand_dims::createTestModel_relaxed_3();
-  const std::vector<Request> requests = createRequests(expand_dims::examples_relaxed_3);
+  const std::vector<Request> requests = createRequests(expand_dims::get_examples_relaxed_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4717,12 +5282,12 @@
   generated_tests::Execute(device,
                            expand_dims::createTestModel_quant8_3,
                            expand_dims::is_ignored_quant8_3,
-                           expand_dims::examples_quant8_3);
+                           expand_dims::get_examples_quant8_3());
 }
 
 TEST_F(ValidationTest, expand_dims_quant8_3) {
   const Model model = expand_dims::createTestModel_quant8_3();
-  const std::vector<Request> requests = createRequests(expand_dims::examples_quant8_3);
+  const std::vector<Request> requests = createRequests(expand_dims::get_examples_quant8_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4732,12 +5297,12 @@
   generated_tests::Execute(device,
                            expand_dims::createTestModel_int32_3,
                            expand_dims::is_ignored_int32_3,
-                           expand_dims::examples_int32_3);
+                           expand_dims::get_examples_int32_3());
 }
 
 TEST_F(ValidationTest, expand_dims_int32_3) {
   const Model model = expand_dims::createTestModel_int32_3();
-  const std::vector<Request> requests = createRequests(expand_dims::examples_int32_3);
+  const std::vector<Request> requests = createRequests(expand_dims::get_examples_int32_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4747,12 +5312,12 @@
   generated_tests::Execute(device,
                            expand_dims::createTestModel_4,
                            expand_dims::is_ignored_4,
-                           expand_dims::examples_4);
+                           expand_dims::get_examples_4());
 }
 
 TEST_F(ValidationTest, expand_dims_4) {
   const Model model = expand_dims::createTestModel_4();
-  const std::vector<Request> requests = createRequests(expand_dims::examples_4);
+  const std::vector<Request> requests = createRequests(expand_dims::get_examples_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4762,12 +5327,12 @@
   generated_tests::Execute(device,
                            expand_dims::createTestModel_relaxed_4,
                            expand_dims::is_ignored_relaxed_4,
-                           expand_dims::examples_relaxed_4);
+                           expand_dims::get_examples_relaxed_4());
 }
 
 TEST_F(ValidationTest, expand_dims_relaxed_4) {
   const Model model = expand_dims::createTestModel_relaxed_4();
-  const std::vector<Request> requests = createRequests(expand_dims::examples_relaxed_4);
+  const std::vector<Request> requests = createRequests(expand_dims::get_examples_relaxed_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4777,12 +5342,12 @@
   generated_tests::Execute(device,
                            expand_dims::createTestModel_quant8_4,
                            expand_dims::is_ignored_quant8_4,
-                           expand_dims::examples_quant8_4);
+                           expand_dims::get_examples_quant8_4());
 }
 
 TEST_F(ValidationTest, expand_dims_quant8_4) {
   const Model model = expand_dims::createTestModel_quant8_4();
-  const std::vector<Request> requests = createRequests(expand_dims::examples_quant8_4);
+  const std::vector<Request> requests = createRequests(expand_dims::get_examples_quant8_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4792,12 +5357,12 @@
   generated_tests::Execute(device,
                            expand_dims::createTestModel_int32_4,
                            expand_dims::is_ignored_int32_4,
-                           expand_dims::examples_int32_4);
+                           expand_dims::get_examples_int32_4());
 }
 
 TEST_F(ValidationTest, expand_dims_int32_4) {
   const Model model = expand_dims::createTestModel_int32_4();
-  const std::vector<Request> requests = createRequests(expand_dims::examples_int32_4);
+  const std::vector<Request> requests = createRequests(expand_dims::get_examples_int32_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4815,12 +5380,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel,
                            gather::is_ignored,
-                           gather::examples);
+                           gather::get_examples());
 }
 
 TEST_F(ValidationTest, gather) {
   const Model model = gather::createTestModel();
-  const std::vector<Request> requests = createRequests(gather::examples);
+  const std::vector<Request> requests = createRequests(gather::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4830,12 +5395,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_relaxed,
                            gather::is_ignored_relaxed,
-                           gather::examples_relaxed);
+                           gather::get_examples_relaxed());
 }
 
 TEST_F(ValidationTest, gather_relaxed) {
   const Model model = gather::createTestModel_relaxed();
-  const std::vector<Request> requests = createRequests(gather::examples_relaxed);
+  const std::vector<Request> requests = createRequests(gather::get_examples_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4845,12 +5410,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_quant8,
                            gather::is_ignored_quant8,
-                           gather::examples_quant8);
+                           gather::get_examples_quant8());
 }
 
 TEST_F(ValidationTest, gather_quant8) {
   const Model model = gather::createTestModel_quant8();
-  const std::vector<Request> requests = createRequests(gather::examples_quant8);
+  const std::vector<Request> requests = createRequests(gather::get_examples_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4860,12 +5425,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_int32,
                            gather::is_ignored_int32,
-                           gather::examples_int32);
+                           gather::get_examples_int32());
 }
 
 TEST_F(ValidationTest, gather_int32) {
   const Model model = gather::createTestModel_int32();
-  const std::vector<Request> requests = createRequests(gather::examples_int32);
+  const std::vector<Request> requests = createRequests(gather::get_examples_int32());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4875,12 +5440,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_float16,
                            gather::is_ignored_float16,
-                           gather::examples_float16);
+                           gather::get_examples_float16());
 }
 
 TEST_F(ValidationTest, gather_float16) {
   const Model model = gather::createTestModel_float16();
-  const std::vector<Request> requests = createRequests(gather::examples_float16);
+  const std::vector<Request> requests = createRequests(gather::get_examples_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4890,12 +5455,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_2,
                            gather::is_ignored_2,
-                           gather::examples_2);
+                           gather::get_examples_2());
 }
 
 TEST_F(ValidationTest, gather_2) {
   const Model model = gather::createTestModel_2();
-  const std::vector<Request> requests = createRequests(gather::examples_2);
+  const std::vector<Request> requests = createRequests(gather::get_examples_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4905,12 +5470,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_relaxed_2,
                            gather::is_ignored_relaxed_2,
-                           gather::examples_relaxed_2);
+                           gather::get_examples_relaxed_2());
 }
 
 TEST_F(ValidationTest, gather_relaxed_2) {
   const Model model = gather::createTestModel_relaxed_2();
-  const std::vector<Request> requests = createRequests(gather::examples_relaxed_2);
+  const std::vector<Request> requests = createRequests(gather::get_examples_relaxed_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4920,12 +5485,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_quant8_2,
                            gather::is_ignored_quant8_2,
-                           gather::examples_quant8_2);
+                           gather::get_examples_quant8_2());
 }
 
 TEST_F(ValidationTest, gather_quant8_2) {
   const Model model = gather::createTestModel_quant8_2();
-  const std::vector<Request> requests = createRequests(gather::examples_quant8_2);
+  const std::vector<Request> requests = createRequests(gather::get_examples_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4935,12 +5500,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_int32_2,
                            gather::is_ignored_int32_2,
-                           gather::examples_int32_2);
+                           gather::get_examples_int32_2());
 }
 
 TEST_F(ValidationTest, gather_int32_2) {
   const Model model = gather::createTestModel_int32_2();
-  const std::vector<Request> requests = createRequests(gather::examples_int32_2);
+  const std::vector<Request> requests = createRequests(gather::get_examples_int32_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4950,12 +5515,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_float16_2,
                            gather::is_ignored_float16_2,
-                           gather::examples_float16_2);
+                           gather::get_examples_float16_2());
 }
 
 TEST_F(ValidationTest, gather_float16_2) {
   const Model model = gather::createTestModel_float16_2();
-  const std::vector<Request> requests = createRequests(gather::examples_float16_2);
+  const std::vector<Request> requests = createRequests(gather::get_examples_float16_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4965,12 +5530,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_3,
                            gather::is_ignored_3,
-                           gather::examples_3);
+                           gather::get_examples_3());
 }
 
 TEST_F(ValidationTest, gather_3) {
   const Model model = gather::createTestModel_3();
-  const std::vector<Request> requests = createRequests(gather::examples_3);
+  const std::vector<Request> requests = createRequests(gather::get_examples_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4980,12 +5545,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_relaxed_3,
                            gather::is_ignored_relaxed_3,
-                           gather::examples_relaxed_3);
+                           gather::get_examples_relaxed_3());
 }
 
 TEST_F(ValidationTest, gather_relaxed_3) {
   const Model model = gather::createTestModel_relaxed_3();
-  const std::vector<Request> requests = createRequests(gather::examples_relaxed_3);
+  const std::vector<Request> requests = createRequests(gather::get_examples_relaxed_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -4995,12 +5560,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_quant8_3,
                            gather::is_ignored_quant8_3,
-                           gather::examples_quant8_3);
+                           gather::get_examples_quant8_3());
 }
 
 TEST_F(ValidationTest, gather_quant8_3) {
   const Model model = gather::createTestModel_quant8_3();
-  const std::vector<Request> requests = createRequests(gather::examples_quant8_3);
+  const std::vector<Request> requests = createRequests(gather::get_examples_quant8_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5010,12 +5575,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_int32_3,
                            gather::is_ignored_int32_3,
-                           gather::examples_int32_3);
+                           gather::get_examples_int32_3());
 }
 
 TEST_F(ValidationTest, gather_int32_3) {
   const Model model = gather::createTestModel_int32_3();
-  const std::vector<Request> requests = createRequests(gather::examples_int32_3);
+  const std::vector<Request> requests = createRequests(gather::get_examples_int32_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5025,12 +5590,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_float16_3,
                            gather::is_ignored_float16_3,
-                           gather::examples_float16_3);
+                           gather::get_examples_float16_3());
 }
 
 TEST_F(ValidationTest, gather_float16_3) {
   const Model model = gather::createTestModel_float16_3();
-  const std::vector<Request> requests = createRequests(gather::examples_float16_3);
+  const std::vector<Request> requests = createRequests(gather::get_examples_float16_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5040,12 +5605,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_4,
                            gather::is_ignored_4,
-                           gather::examples_4);
+                           gather::get_examples_4());
 }
 
 TEST_F(ValidationTest, gather_4) {
   const Model model = gather::createTestModel_4();
-  const std::vector<Request> requests = createRequests(gather::examples_4);
+  const std::vector<Request> requests = createRequests(gather::get_examples_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5055,12 +5620,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_relaxed_4,
                            gather::is_ignored_relaxed_4,
-                           gather::examples_relaxed_4);
+                           gather::get_examples_relaxed_4());
 }
 
 TEST_F(ValidationTest, gather_relaxed_4) {
   const Model model = gather::createTestModel_relaxed_4();
-  const std::vector<Request> requests = createRequests(gather::examples_relaxed_4);
+  const std::vector<Request> requests = createRequests(gather::get_examples_relaxed_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5070,12 +5635,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_quant8_4,
                            gather::is_ignored_quant8_4,
-                           gather::examples_quant8_4);
+                           gather::get_examples_quant8_4());
 }
 
 TEST_F(ValidationTest, gather_quant8_4) {
   const Model model = gather::createTestModel_quant8_4();
-  const std::vector<Request> requests = createRequests(gather::examples_quant8_4);
+  const std::vector<Request> requests = createRequests(gather::get_examples_quant8_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5085,12 +5650,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_int32_4,
                            gather::is_ignored_int32_4,
-                           gather::examples_int32_4);
+                           gather::get_examples_int32_4());
 }
 
 TEST_F(ValidationTest, gather_int32_4) {
   const Model model = gather::createTestModel_int32_4();
-  const std::vector<Request> requests = createRequests(gather::examples_int32_4);
+  const std::vector<Request> requests = createRequests(gather::get_examples_int32_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5100,12 +5665,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_float16_4,
                            gather::is_ignored_float16_4,
-                           gather::examples_float16_4);
+                           gather::get_examples_float16_4());
 }
 
 TEST_F(ValidationTest, gather_float16_4) {
   const Model model = gather::createTestModel_float16_4();
-  const std::vector<Request> requests = createRequests(gather::examples_float16_4);
+  const std::vector<Request> requests = createRequests(gather::get_examples_float16_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5115,12 +5680,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_5,
                            gather::is_ignored_5,
-                           gather::examples_5);
+                           gather::get_examples_5());
 }
 
 TEST_F(ValidationTest, gather_5) {
   const Model model = gather::createTestModel_5();
-  const std::vector<Request> requests = createRequests(gather::examples_5);
+  const std::vector<Request> requests = createRequests(gather::get_examples_5());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5130,12 +5695,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_relaxed_5,
                            gather::is_ignored_relaxed_5,
-                           gather::examples_relaxed_5);
+                           gather::get_examples_relaxed_5());
 }
 
 TEST_F(ValidationTest, gather_relaxed_5) {
   const Model model = gather::createTestModel_relaxed_5();
-  const std::vector<Request> requests = createRequests(gather::examples_relaxed_5);
+  const std::vector<Request> requests = createRequests(gather::get_examples_relaxed_5());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5145,12 +5710,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_quant8_5,
                            gather::is_ignored_quant8_5,
-                           gather::examples_quant8_5);
+                           gather::get_examples_quant8_5());
 }
 
 TEST_F(ValidationTest, gather_quant8_5) {
   const Model model = gather::createTestModel_quant8_5();
-  const std::vector<Request> requests = createRequests(gather::examples_quant8_5);
+  const std::vector<Request> requests = createRequests(gather::get_examples_quant8_5());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5160,12 +5725,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_int32_5,
                            gather::is_ignored_int32_5,
-                           gather::examples_int32_5);
+                           gather::get_examples_int32_5());
 }
 
 TEST_F(ValidationTest, gather_int32_5) {
   const Model model = gather::createTestModel_int32_5();
-  const std::vector<Request> requests = createRequests(gather::examples_int32_5);
+  const std::vector<Request> requests = createRequests(gather::get_examples_int32_5());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5175,12 +5740,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_float16_5,
                            gather::is_ignored_float16_5,
-                           gather::examples_float16_5);
+                           gather::get_examples_float16_5());
 }
 
 TEST_F(ValidationTest, gather_float16_5) {
   const Model model = gather::createTestModel_float16_5();
-  const std::vector<Request> requests = createRequests(gather::examples_float16_5);
+  const std::vector<Request> requests = createRequests(gather::get_examples_float16_5());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5190,12 +5755,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_6,
                            gather::is_ignored_6,
-                           gather::examples_6);
+                           gather::get_examples_6());
 }
 
 TEST_F(ValidationTest, gather_6) {
   const Model model = gather::createTestModel_6();
-  const std::vector<Request> requests = createRequests(gather::examples_6);
+  const std::vector<Request> requests = createRequests(gather::get_examples_6());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5205,12 +5770,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_relaxed_6,
                            gather::is_ignored_relaxed_6,
-                           gather::examples_relaxed_6);
+                           gather::get_examples_relaxed_6());
 }
 
 TEST_F(ValidationTest, gather_relaxed_6) {
   const Model model = gather::createTestModel_relaxed_6();
-  const std::vector<Request> requests = createRequests(gather::examples_relaxed_6);
+  const std::vector<Request> requests = createRequests(gather::get_examples_relaxed_6());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5220,12 +5785,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_quant8_6,
                            gather::is_ignored_quant8_6,
-                           gather::examples_quant8_6);
+                           gather::get_examples_quant8_6());
 }
 
 TEST_F(ValidationTest, gather_quant8_6) {
   const Model model = gather::createTestModel_quant8_6();
-  const std::vector<Request> requests = createRequests(gather::examples_quant8_6);
+  const std::vector<Request> requests = createRequests(gather::get_examples_quant8_6());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5235,12 +5800,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_int32_6,
                            gather::is_ignored_int32_6,
-                           gather::examples_int32_6);
+                           gather::get_examples_int32_6());
 }
 
 TEST_F(ValidationTest, gather_int32_6) {
   const Model model = gather::createTestModel_int32_6();
-  const std::vector<Request> requests = createRequests(gather::examples_int32_6);
+  const std::vector<Request> requests = createRequests(gather::get_examples_int32_6());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5250,12 +5815,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_float16_6,
                            gather::is_ignored_float16_6,
-                           gather::examples_float16_6);
+                           gather::get_examples_float16_6());
 }
 
 TEST_F(ValidationTest, gather_float16_6) {
   const Model model = gather::createTestModel_float16_6();
-  const std::vector<Request> requests = createRequests(gather::examples_float16_6);
+  const std::vector<Request> requests = createRequests(gather::get_examples_float16_6());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5265,12 +5830,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_7,
                            gather::is_ignored_7,
-                           gather::examples_7);
+                           gather::get_examples_7());
 }
 
 TEST_F(ValidationTest, gather_7) {
   const Model model = gather::createTestModel_7();
-  const std::vector<Request> requests = createRequests(gather::examples_7);
+  const std::vector<Request> requests = createRequests(gather::get_examples_7());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5280,12 +5845,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_relaxed_7,
                            gather::is_ignored_relaxed_7,
-                           gather::examples_relaxed_7);
+                           gather::get_examples_relaxed_7());
 }
 
 TEST_F(ValidationTest, gather_relaxed_7) {
   const Model model = gather::createTestModel_relaxed_7();
-  const std::vector<Request> requests = createRequests(gather::examples_relaxed_7);
+  const std::vector<Request> requests = createRequests(gather::get_examples_relaxed_7());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5295,12 +5860,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_quant8_7,
                            gather::is_ignored_quant8_7,
-                           gather::examples_quant8_7);
+                           gather::get_examples_quant8_7());
 }
 
 TEST_F(ValidationTest, gather_quant8_7) {
   const Model model = gather::createTestModel_quant8_7();
-  const std::vector<Request> requests = createRequests(gather::examples_quant8_7);
+  const std::vector<Request> requests = createRequests(gather::get_examples_quant8_7());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5310,12 +5875,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_int32_7,
                            gather::is_ignored_int32_7,
-                           gather::examples_int32_7);
+                           gather::get_examples_int32_7());
 }
 
 TEST_F(ValidationTest, gather_int32_7) {
   const Model model = gather::createTestModel_int32_7();
-  const std::vector<Request> requests = createRequests(gather::examples_int32_7);
+  const std::vector<Request> requests = createRequests(gather::get_examples_int32_7());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5325,12 +5890,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_float16_7,
                            gather::is_ignored_float16_7,
-                           gather::examples_float16_7);
+                           gather::get_examples_float16_7());
 }
 
 TEST_F(ValidationTest, gather_float16_7) {
   const Model model = gather::createTestModel_float16_7();
-  const std::vector<Request> requests = createRequests(gather::examples_float16_7);
+  const std::vector<Request> requests = createRequests(gather::get_examples_float16_7());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5340,12 +5905,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_8,
                            gather::is_ignored_8,
-                           gather::examples_8);
+                           gather::get_examples_8());
 }
 
 TEST_F(ValidationTest, gather_8) {
   const Model model = gather::createTestModel_8();
-  const std::vector<Request> requests = createRequests(gather::examples_8);
+  const std::vector<Request> requests = createRequests(gather::get_examples_8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5355,12 +5920,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_relaxed_8,
                            gather::is_ignored_relaxed_8,
-                           gather::examples_relaxed_8);
+                           gather::get_examples_relaxed_8());
 }
 
 TEST_F(ValidationTest, gather_relaxed_8) {
   const Model model = gather::createTestModel_relaxed_8();
-  const std::vector<Request> requests = createRequests(gather::examples_relaxed_8);
+  const std::vector<Request> requests = createRequests(gather::get_examples_relaxed_8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5370,12 +5935,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_quant8_8,
                            gather::is_ignored_quant8_8,
-                           gather::examples_quant8_8);
+                           gather::get_examples_quant8_8());
 }
 
 TEST_F(ValidationTest, gather_quant8_8) {
   const Model model = gather::createTestModel_quant8_8();
-  const std::vector<Request> requests = createRequests(gather::examples_quant8_8);
+  const std::vector<Request> requests = createRequests(gather::get_examples_quant8_8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5385,12 +5950,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_int32_8,
                            gather::is_ignored_int32_8,
-                           gather::examples_int32_8);
+                           gather::get_examples_int32_8());
 }
 
 TEST_F(ValidationTest, gather_int32_8) {
   const Model model = gather::createTestModel_int32_8();
-  const std::vector<Request> requests = createRequests(gather::examples_int32_8);
+  const std::vector<Request> requests = createRequests(gather::get_examples_int32_8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5400,12 +5965,12 @@
   generated_tests::Execute(device,
                            gather::createTestModel_float16_8,
                            gather::is_ignored_float16_8,
-                           gather::examples_float16_8);
+                           gather::get_examples_float16_8());
 }
 
 TEST_F(ValidationTest, gather_float16_8) {
   const Model model = gather::createTestModel_float16_8();
-  const std::vector<Request> requests = createRequests(gather::examples_float16_8);
+  const std::vector<Request> requests = createRequests(gather::get_examples_float16_8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5423,12 +5988,12 @@
   generated_tests::Execute(device,
                            gather_higher_rank::createTestModel,
                            gather_higher_rank::is_ignored,
-                           gather_higher_rank::examples);
+                           gather_higher_rank::get_examples());
 }
 
 TEST_F(ValidationTest, gather_higher_rank) {
   const Model model = gather_higher_rank::createTestModel();
-  const std::vector<Request> requests = createRequests(gather_higher_rank::examples);
+  const std::vector<Request> requests = createRequests(gather_higher_rank::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5438,12 +6003,12 @@
   generated_tests::Execute(device,
                            gather_higher_rank::createTestModel_relaxed,
                            gather_higher_rank::is_ignored_relaxed,
-                           gather_higher_rank::examples_relaxed);
+                           gather_higher_rank::get_examples_relaxed());
 }
 
 TEST_F(ValidationTest, gather_higher_rank_relaxed) {
   const Model model = gather_higher_rank::createTestModel_relaxed();
-  const std::vector<Request> requests = createRequests(gather_higher_rank::examples_relaxed);
+  const std::vector<Request> requests = createRequests(gather_higher_rank::get_examples_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5453,12 +6018,12 @@
   generated_tests::Execute(device,
                            gather_higher_rank::createTestModel_quant8,
                            gather_higher_rank::is_ignored_quant8,
-                           gather_higher_rank::examples_quant8);
+                           gather_higher_rank::get_examples_quant8());
 }
 
 TEST_F(ValidationTest, gather_higher_rank_quant8) {
   const Model model = gather_higher_rank::createTestModel_quant8();
-  const std::vector<Request> requests = createRequests(gather_higher_rank::examples_quant8);
+  const std::vector<Request> requests = createRequests(gather_higher_rank::get_examples_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5468,12 +6033,12 @@
   generated_tests::Execute(device,
                            gather_higher_rank::createTestModel_int32,
                            gather_higher_rank::is_ignored_int32,
-                           gather_higher_rank::examples_int32);
+                           gather_higher_rank::get_examples_int32());
 }
 
 TEST_F(ValidationTest, gather_higher_rank_int32) {
   const Model model = gather_higher_rank::createTestModel_int32();
-  const std::vector<Request> requests = createRequests(gather_higher_rank::examples_int32);
+  const std::vector<Request> requests = createRequests(gather_higher_rank::get_examples_int32());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5491,12 +6056,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nhwc_none,
                            grouped_conv2d::is_ignored_nhwc_none,
-                           grouped_conv2d::examples_nhwc_none);
+                           grouped_conv2d::get_examples_nhwc_none());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nhwc_none) {
   const Model model = grouped_conv2d::createTestModel_nhwc_none();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nhwc_none);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nhwc_none());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5506,12 +6071,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nhwc_none_weight_as_input,
                            grouped_conv2d::is_ignored_nhwc_none_weight_as_input,
-                           grouped_conv2d::examples_nhwc_none_weight_as_input);
+                           grouped_conv2d::get_examples_nhwc_none_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nhwc_none_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_nhwc_none_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nhwc_none_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nhwc_none_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5521,12 +6086,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nhwc_none_relaxed,
                            grouped_conv2d::is_ignored_nhwc_none_relaxed,
-                           grouped_conv2d::examples_nhwc_none_relaxed);
+                           grouped_conv2d::get_examples_nhwc_none_relaxed());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nhwc_none_relaxed) {
   const Model model = grouped_conv2d::createTestModel_nhwc_none_relaxed();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nhwc_none_relaxed);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nhwc_none_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5536,12 +6101,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nhwc_none_relaxed_weight_as_input,
                            grouped_conv2d::is_ignored_nhwc_none_relaxed_weight_as_input,
-                           grouped_conv2d::examples_nhwc_none_relaxed_weight_as_input);
+                           grouped_conv2d::get_examples_nhwc_none_relaxed_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nhwc_none_relaxed_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_nhwc_none_relaxed_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nhwc_none_relaxed_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nhwc_none_relaxed_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5551,12 +6116,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nhwc_none_quant8,
                            grouped_conv2d::is_ignored_nhwc_none_quant8,
-                           grouped_conv2d::examples_nhwc_none_quant8);
+                           grouped_conv2d::get_examples_nhwc_none_quant8());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nhwc_none_quant8) {
   const Model model = grouped_conv2d::createTestModel_nhwc_none_quant8();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nhwc_none_quant8);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nhwc_none_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5566,12 +6131,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nhwc_none_quant8_weight_as_input,
                            grouped_conv2d::is_ignored_nhwc_none_quant8_weight_as_input,
-                           grouped_conv2d::examples_nhwc_none_quant8_weight_as_input);
+                           grouped_conv2d::get_examples_nhwc_none_quant8_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nhwc_none_quant8_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_nhwc_none_quant8_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nhwc_none_quant8_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nhwc_none_quant8_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5581,12 +6146,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nhwc_relu,
                            grouped_conv2d::is_ignored_nhwc_relu,
-                           grouped_conv2d::examples_nhwc_relu);
+                           grouped_conv2d::get_examples_nhwc_relu());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nhwc_relu) {
   const Model model = grouped_conv2d::createTestModel_nhwc_relu();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nhwc_relu);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nhwc_relu());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5596,12 +6161,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nhwc_relu_weight_as_input,
                            grouped_conv2d::is_ignored_nhwc_relu_weight_as_input,
-                           grouped_conv2d::examples_nhwc_relu_weight_as_input);
+                           grouped_conv2d::get_examples_nhwc_relu_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nhwc_relu_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_nhwc_relu_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nhwc_relu_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nhwc_relu_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5611,12 +6176,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nhwc_relu_relaxed,
                            grouped_conv2d::is_ignored_nhwc_relu_relaxed,
-                           grouped_conv2d::examples_nhwc_relu_relaxed);
+                           grouped_conv2d::get_examples_nhwc_relu_relaxed());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nhwc_relu_relaxed) {
   const Model model = grouped_conv2d::createTestModel_nhwc_relu_relaxed();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nhwc_relu_relaxed);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nhwc_relu_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5626,12 +6191,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nhwc_relu_relaxed_weight_as_input,
                            grouped_conv2d::is_ignored_nhwc_relu_relaxed_weight_as_input,
-                           grouped_conv2d::examples_nhwc_relu_relaxed_weight_as_input);
+                           grouped_conv2d::get_examples_nhwc_relu_relaxed_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nhwc_relu_relaxed_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_nhwc_relu_relaxed_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nhwc_relu_relaxed_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nhwc_relu_relaxed_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5641,12 +6206,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nhwc_relu_quant8,
                            grouped_conv2d::is_ignored_nhwc_relu_quant8,
-                           grouped_conv2d::examples_nhwc_relu_quant8);
+                           grouped_conv2d::get_examples_nhwc_relu_quant8());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nhwc_relu_quant8) {
   const Model model = grouped_conv2d::createTestModel_nhwc_relu_quant8();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nhwc_relu_quant8);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nhwc_relu_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5656,12 +6221,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nhwc_relu_quant8_weight_as_input,
                            grouped_conv2d::is_ignored_nhwc_relu_quant8_weight_as_input,
-                           grouped_conv2d::examples_nhwc_relu_quant8_weight_as_input);
+                           grouped_conv2d::get_examples_nhwc_relu_quant8_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nhwc_relu_quant8_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_nhwc_relu_quant8_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nhwc_relu_quant8_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nhwc_relu_quant8_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5671,12 +6236,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nhwc_relu1,
                            grouped_conv2d::is_ignored_nhwc_relu1,
-                           grouped_conv2d::examples_nhwc_relu1);
+                           grouped_conv2d::get_examples_nhwc_relu1());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nhwc_relu1) {
   const Model model = grouped_conv2d::createTestModel_nhwc_relu1();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nhwc_relu1);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nhwc_relu1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5686,12 +6251,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nhwc_relu1_weight_as_input,
                            grouped_conv2d::is_ignored_nhwc_relu1_weight_as_input,
-                           grouped_conv2d::examples_nhwc_relu1_weight_as_input);
+                           grouped_conv2d::get_examples_nhwc_relu1_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nhwc_relu1_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_nhwc_relu1_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nhwc_relu1_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nhwc_relu1_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5701,12 +6266,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nhwc_relu1_relaxed,
                            grouped_conv2d::is_ignored_nhwc_relu1_relaxed,
-                           grouped_conv2d::examples_nhwc_relu1_relaxed);
+                           grouped_conv2d::get_examples_nhwc_relu1_relaxed());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nhwc_relu1_relaxed) {
   const Model model = grouped_conv2d::createTestModel_nhwc_relu1_relaxed();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nhwc_relu1_relaxed);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nhwc_relu1_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5716,12 +6281,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nhwc_relu1_relaxed_weight_as_input,
                            grouped_conv2d::is_ignored_nhwc_relu1_relaxed_weight_as_input,
-                           grouped_conv2d::examples_nhwc_relu1_relaxed_weight_as_input);
+                           grouped_conv2d::get_examples_nhwc_relu1_relaxed_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nhwc_relu1_relaxed_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_nhwc_relu1_relaxed_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nhwc_relu1_relaxed_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nhwc_relu1_relaxed_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5731,12 +6296,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nhwc_relu1_quant8,
                            grouped_conv2d::is_ignored_nhwc_relu1_quant8,
-                           grouped_conv2d::examples_nhwc_relu1_quant8);
+                           grouped_conv2d::get_examples_nhwc_relu1_quant8());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nhwc_relu1_quant8) {
   const Model model = grouped_conv2d::createTestModel_nhwc_relu1_quant8();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nhwc_relu1_quant8);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nhwc_relu1_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5746,12 +6311,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nhwc_relu1_quant8_weight_as_input,
                            grouped_conv2d::is_ignored_nhwc_relu1_quant8_weight_as_input,
-                           grouped_conv2d::examples_nhwc_relu1_quant8_weight_as_input);
+                           grouped_conv2d::get_examples_nhwc_relu1_quant8_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nhwc_relu1_quant8_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_nhwc_relu1_quant8_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nhwc_relu1_quant8_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nhwc_relu1_quant8_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5761,12 +6326,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nhwc_relu6,
                            grouped_conv2d::is_ignored_nhwc_relu6,
-                           grouped_conv2d::examples_nhwc_relu6);
+                           grouped_conv2d::get_examples_nhwc_relu6());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nhwc_relu6) {
   const Model model = grouped_conv2d::createTestModel_nhwc_relu6();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nhwc_relu6);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nhwc_relu6());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5776,12 +6341,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nhwc_relu6_weight_as_input,
                            grouped_conv2d::is_ignored_nhwc_relu6_weight_as_input,
-                           grouped_conv2d::examples_nhwc_relu6_weight_as_input);
+                           grouped_conv2d::get_examples_nhwc_relu6_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nhwc_relu6_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_nhwc_relu6_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nhwc_relu6_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nhwc_relu6_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5791,12 +6356,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nhwc_relu6_relaxed,
                            grouped_conv2d::is_ignored_nhwc_relu6_relaxed,
-                           grouped_conv2d::examples_nhwc_relu6_relaxed);
+                           grouped_conv2d::get_examples_nhwc_relu6_relaxed());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nhwc_relu6_relaxed) {
   const Model model = grouped_conv2d::createTestModel_nhwc_relu6_relaxed();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nhwc_relu6_relaxed);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nhwc_relu6_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5806,12 +6371,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nhwc_relu6_relaxed_weight_as_input,
                            grouped_conv2d::is_ignored_nhwc_relu6_relaxed_weight_as_input,
-                           grouped_conv2d::examples_nhwc_relu6_relaxed_weight_as_input);
+                           grouped_conv2d::get_examples_nhwc_relu6_relaxed_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nhwc_relu6_relaxed_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_nhwc_relu6_relaxed_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nhwc_relu6_relaxed_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nhwc_relu6_relaxed_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5821,12 +6386,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nhwc_relu6_quant8,
                            grouped_conv2d::is_ignored_nhwc_relu6_quant8,
-                           grouped_conv2d::examples_nhwc_relu6_quant8);
+                           grouped_conv2d::get_examples_nhwc_relu6_quant8());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nhwc_relu6_quant8) {
   const Model model = grouped_conv2d::createTestModel_nhwc_relu6_quant8();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nhwc_relu6_quant8);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nhwc_relu6_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5836,12 +6401,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nhwc_relu6_quant8_weight_as_input,
                            grouped_conv2d::is_ignored_nhwc_relu6_quant8_weight_as_input,
-                           grouped_conv2d::examples_nhwc_relu6_quant8_weight_as_input);
+                           grouped_conv2d::get_examples_nhwc_relu6_quant8_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nhwc_relu6_quant8_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_nhwc_relu6_quant8_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nhwc_relu6_quant8_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nhwc_relu6_quant8_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5851,12 +6416,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nchw_none,
                            grouped_conv2d::is_ignored_nchw_none,
-                           grouped_conv2d::examples_nchw_none);
+                           grouped_conv2d::get_examples_nchw_none());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nchw_none) {
   const Model model = grouped_conv2d::createTestModel_nchw_none();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nchw_none);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nchw_none());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5866,12 +6431,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nchw_none_weight_as_input,
                            grouped_conv2d::is_ignored_nchw_none_weight_as_input,
-                           grouped_conv2d::examples_nchw_none_weight_as_input);
+                           grouped_conv2d::get_examples_nchw_none_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nchw_none_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_nchw_none_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nchw_none_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nchw_none_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5881,12 +6446,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nchw_none_relaxed,
                            grouped_conv2d::is_ignored_nchw_none_relaxed,
-                           grouped_conv2d::examples_nchw_none_relaxed);
+                           grouped_conv2d::get_examples_nchw_none_relaxed());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nchw_none_relaxed) {
   const Model model = grouped_conv2d::createTestModel_nchw_none_relaxed();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nchw_none_relaxed);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nchw_none_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5896,12 +6461,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nchw_none_relaxed_weight_as_input,
                            grouped_conv2d::is_ignored_nchw_none_relaxed_weight_as_input,
-                           grouped_conv2d::examples_nchw_none_relaxed_weight_as_input);
+                           grouped_conv2d::get_examples_nchw_none_relaxed_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nchw_none_relaxed_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_nchw_none_relaxed_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nchw_none_relaxed_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nchw_none_relaxed_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5911,12 +6476,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nchw_none_quant8,
                            grouped_conv2d::is_ignored_nchw_none_quant8,
-                           grouped_conv2d::examples_nchw_none_quant8);
+                           grouped_conv2d::get_examples_nchw_none_quant8());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nchw_none_quant8) {
   const Model model = grouped_conv2d::createTestModel_nchw_none_quant8();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nchw_none_quant8);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nchw_none_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5926,12 +6491,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nchw_none_quant8_weight_as_input,
                            grouped_conv2d::is_ignored_nchw_none_quant8_weight_as_input,
-                           grouped_conv2d::examples_nchw_none_quant8_weight_as_input);
+                           grouped_conv2d::get_examples_nchw_none_quant8_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nchw_none_quant8_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_nchw_none_quant8_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nchw_none_quant8_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nchw_none_quant8_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5941,12 +6506,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nchw_relu,
                            grouped_conv2d::is_ignored_nchw_relu,
-                           grouped_conv2d::examples_nchw_relu);
+                           grouped_conv2d::get_examples_nchw_relu());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nchw_relu) {
   const Model model = grouped_conv2d::createTestModel_nchw_relu();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nchw_relu);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nchw_relu());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5956,12 +6521,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nchw_relu_weight_as_input,
                            grouped_conv2d::is_ignored_nchw_relu_weight_as_input,
-                           grouped_conv2d::examples_nchw_relu_weight_as_input);
+                           grouped_conv2d::get_examples_nchw_relu_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nchw_relu_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_nchw_relu_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nchw_relu_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nchw_relu_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5971,12 +6536,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nchw_relu_relaxed,
                            grouped_conv2d::is_ignored_nchw_relu_relaxed,
-                           grouped_conv2d::examples_nchw_relu_relaxed);
+                           grouped_conv2d::get_examples_nchw_relu_relaxed());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nchw_relu_relaxed) {
   const Model model = grouped_conv2d::createTestModel_nchw_relu_relaxed();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nchw_relu_relaxed);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nchw_relu_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -5986,12 +6551,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nchw_relu_relaxed_weight_as_input,
                            grouped_conv2d::is_ignored_nchw_relu_relaxed_weight_as_input,
-                           grouped_conv2d::examples_nchw_relu_relaxed_weight_as_input);
+                           grouped_conv2d::get_examples_nchw_relu_relaxed_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nchw_relu_relaxed_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_nchw_relu_relaxed_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nchw_relu_relaxed_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nchw_relu_relaxed_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6001,12 +6566,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nchw_relu_quant8,
                            grouped_conv2d::is_ignored_nchw_relu_quant8,
-                           grouped_conv2d::examples_nchw_relu_quant8);
+                           grouped_conv2d::get_examples_nchw_relu_quant8());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nchw_relu_quant8) {
   const Model model = grouped_conv2d::createTestModel_nchw_relu_quant8();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nchw_relu_quant8);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nchw_relu_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6016,12 +6581,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nchw_relu_quant8_weight_as_input,
                            grouped_conv2d::is_ignored_nchw_relu_quant8_weight_as_input,
-                           grouped_conv2d::examples_nchw_relu_quant8_weight_as_input);
+                           grouped_conv2d::get_examples_nchw_relu_quant8_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nchw_relu_quant8_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_nchw_relu_quant8_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nchw_relu_quant8_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nchw_relu_quant8_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6031,12 +6596,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nchw_relu1,
                            grouped_conv2d::is_ignored_nchw_relu1,
-                           grouped_conv2d::examples_nchw_relu1);
+                           grouped_conv2d::get_examples_nchw_relu1());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nchw_relu1) {
   const Model model = grouped_conv2d::createTestModel_nchw_relu1();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nchw_relu1);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nchw_relu1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6046,12 +6611,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nchw_relu1_weight_as_input,
                            grouped_conv2d::is_ignored_nchw_relu1_weight_as_input,
-                           grouped_conv2d::examples_nchw_relu1_weight_as_input);
+                           grouped_conv2d::get_examples_nchw_relu1_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nchw_relu1_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_nchw_relu1_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nchw_relu1_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nchw_relu1_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6061,12 +6626,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nchw_relu1_relaxed,
                            grouped_conv2d::is_ignored_nchw_relu1_relaxed,
-                           grouped_conv2d::examples_nchw_relu1_relaxed);
+                           grouped_conv2d::get_examples_nchw_relu1_relaxed());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nchw_relu1_relaxed) {
   const Model model = grouped_conv2d::createTestModel_nchw_relu1_relaxed();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nchw_relu1_relaxed);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nchw_relu1_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6076,12 +6641,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nchw_relu1_relaxed_weight_as_input,
                            grouped_conv2d::is_ignored_nchw_relu1_relaxed_weight_as_input,
-                           grouped_conv2d::examples_nchw_relu1_relaxed_weight_as_input);
+                           grouped_conv2d::get_examples_nchw_relu1_relaxed_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nchw_relu1_relaxed_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_nchw_relu1_relaxed_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nchw_relu1_relaxed_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nchw_relu1_relaxed_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6091,12 +6656,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nchw_relu1_quant8,
                            grouped_conv2d::is_ignored_nchw_relu1_quant8,
-                           grouped_conv2d::examples_nchw_relu1_quant8);
+                           grouped_conv2d::get_examples_nchw_relu1_quant8());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nchw_relu1_quant8) {
   const Model model = grouped_conv2d::createTestModel_nchw_relu1_quant8();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nchw_relu1_quant8);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nchw_relu1_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6106,12 +6671,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nchw_relu1_quant8_weight_as_input,
                            grouped_conv2d::is_ignored_nchw_relu1_quant8_weight_as_input,
-                           grouped_conv2d::examples_nchw_relu1_quant8_weight_as_input);
+                           grouped_conv2d::get_examples_nchw_relu1_quant8_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nchw_relu1_quant8_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_nchw_relu1_quant8_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nchw_relu1_quant8_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nchw_relu1_quant8_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6121,12 +6686,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nchw_relu6,
                            grouped_conv2d::is_ignored_nchw_relu6,
-                           grouped_conv2d::examples_nchw_relu6);
+                           grouped_conv2d::get_examples_nchw_relu6());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nchw_relu6) {
   const Model model = grouped_conv2d::createTestModel_nchw_relu6();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nchw_relu6);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nchw_relu6());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6136,12 +6701,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nchw_relu6_weight_as_input,
                            grouped_conv2d::is_ignored_nchw_relu6_weight_as_input,
-                           grouped_conv2d::examples_nchw_relu6_weight_as_input);
+                           grouped_conv2d::get_examples_nchw_relu6_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nchw_relu6_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_nchw_relu6_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nchw_relu6_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nchw_relu6_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6151,12 +6716,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nchw_relu6_relaxed,
                            grouped_conv2d::is_ignored_nchw_relu6_relaxed,
-                           grouped_conv2d::examples_nchw_relu6_relaxed);
+                           grouped_conv2d::get_examples_nchw_relu6_relaxed());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nchw_relu6_relaxed) {
   const Model model = grouped_conv2d::createTestModel_nchw_relu6_relaxed();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nchw_relu6_relaxed);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nchw_relu6_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6166,12 +6731,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nchw_relu6_relaxed_weight_as_input,
                            grouped_conv2d::is_ignored_nchw_relu6_relaxed_weight_as_input,
-                           grouped_conv2d::examples_nchw_relu6_relaxed_weight_as_input);
+                           grouped_conv2d::get_examples_nchw_relu6_relaxed_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nchw_relu6_relaxed_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_nchw_relu6_relaxed_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nchw_relu6_relaxed_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nchw_relu6_relaxed_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6181,12 +6746,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nchw_relu6_quant8,
                            grouped_conv2d::is_ignored_nchw_relu6_quant8,
-                           grouped_conv2d::examples_nchw_relu6_quant8);
+                           grouped_conv2d::get_examples_nchw_relu6_quant8());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nchw_relu6_quant8) {
   const Model model = grouped_conv2d::createTestModel_nchw_relu6_quant8();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nchw_relu6_quant8);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nchw_relu6_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6196,12 +6761,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_nchw_relu6_quant8_weight_as_input,
                            grouped_conv2d::is_ignored_nchw_relu6_quant8_weight_as_input,
-                           grouped_conv2d::examples_nchw_relu6_quant8_weight_as_input);
+                           grouped_conv2d::get_examples_nchw_relu6_quant8_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_nchw_relu6_quant8_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_nchw_relu6_quant8_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_nchw_relu6_quant8_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_nchw_relu6_quant8_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6211,12 +6776,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_large_nhwc,
                            grouped_conv2d::is_ignored_large_nhwc,
-                           grouped_conv2d::examples_large_nhwc);
+                           grouped_conv2d::get_examples_large_nhwc());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_large_nhwc) {
   const Model model = grouped_conv2d::createTestModel_large_nhwc();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_large_nhwc);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_large_nhwc());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6226,12 +6791,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_large_nhwc_weight_as_input,
                            grouped_conv2d::is_ignored_large_nhwc_weight_as_input,
-                           grouped_conv2d::examples_large_nhwc_weight_as_input);
+                           grouped_conv2d::get_examples_large_nhwc_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_large_nhwc_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_large_nhwc_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_large_nhwc_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_large_nhwc_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6241,12 +6806,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_large_nhwc_relaxed,
                            grouped_conv2d::is_ignored_large_nhwc_relaxed,
-                           grouped_conv2d::examples_large_nhwc_relaxed);
+                           grouped_conv2d::get_examples_large_nhwc_relaxed());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_large_nhwc_relaxed) {
   const Model model = grouped_conv2d::createTestModel_large_nhwc_relaxed();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_large_nhwc_relaxed);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_large_nhwc_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6256,12 +6821,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_large_nhwc_relaxed_weight_as_input,
                            grouped_conv2d::is_ignored_large_nhwc_relaxed_weight_as_input,
-                           grouped_conv2d::examples_large_nhwc_relaxed_weight_as_input);
+                           grouped_conv2d::get_examples_large_nhwc_relaxed_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_large_nhwc_relaxed_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_large_nhwc_relaxed_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_large_nhwc_relaxed_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_large_nhwc_relaxed_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6271,12 +6836,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_large_nhwc_quant8,
                            grouped_conv2d::is_ignored_large_nhwc_quant8,
-                           grouped_conv2d::examples_large_nhwc_quant8);
+                           grouped_conv2d::get_examples_large_nhwc_quant8());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_large_nhwc_quant8) {
   const Model model = grouped_conv2d::createTestModel_large_nhwc_quant8();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_large_nhwc_quant8);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_large_nhwc_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6286,12 +6851,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_large_nhwc_quant8_weight_as_input,
                            grouped_conv2d::is_ignored_large_nhwc_quant8_weight_as_input,
-                           grouped_conv2d::examples_large_nhwc_quant8_weight_as_input);
+                           grouped_conv2d::get_examples_large_nhwc_quant8_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_large_nhwc_quant8_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_large_nhwc_quant8_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_large_nhwc_quant8_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_large_nhwc_quant8_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6301,12 +6866,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_large_nchw,
                            grouped_conv2d::is_ignored_large_nchw,
-                           grouped_conv2d::examples_large_nchw);
+                           grouped_conv2d::get_examples_large_nchw());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_large_nchw) {
   const Model model = grouped_conv2d::createTestModel_large_nchw();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_large_nchw);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_large_nchw());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6316,12 +6881,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_large_nchw_weight_as_input,
                            grouped_conv2d::is_ignored_large_nchw_weight_as_input,
-                           grouped_conv2d::examples_large_nchw_weight_as_input);
+                           grouped_conv2d::get_examples_large_nchw_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_large_nchw_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_large_nchw_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_large_nchw_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_large_nchw_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6331,12 +6896,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_large_nchw_relaxed,
                            grouped_conv2d::is_ignored_large_nchw_relaxed,
-                           grouped_conv2d::examples_large_nchw_relaxed);
+                           grouped_conv2d::get_examples_large_nchw_relaxed());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_large_nchw_relaxed) {
   const Model model = grouped_conv2d::createTestModel_large_nchw_relaxed();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_large_nchw_relaxed);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_large_nchw_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6346,12 +6911,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_large_nchw_relaxed_weight_as_input,
                            grouped_conv2d::is_ignored_large_nchw_relaxed_weight_as_input,
-                           grouped_conv2d::examples_large_nchw_relaxed_weight_as_input);
+                           grouped_conv2d::get_examples_large_nchw_relaxed_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_large_nchw_relaxed_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_large_nchw_relaxed_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_large_nchw_relaxed_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_large_nchw_relaxed_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6361,12 +6926,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_large_nchw_quant8,
                            grouped_conv2d::is_ignored_large_nchw_quant8,
-                           grouped_conv2d::examples_large_nchw_quant8);
+                           grouped_conv2d::get_examples_large_nchw_quant8());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_large_nchw_quant8) {
   const Model model = grouped_conv2d::createTestModel_large_nchw_quant8();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_large_nchw_quant8);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_large_nchw_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6376,12 +6941,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_large_nchw_quant8_weight_as_input,
                            grouped_conv2d::is_ignored_large_nchw_quant8_weight_as_input,
-                           grouped_conv2d::examples_large_nchw_quant8_weight_as_input);
+                           grouped_conv2d::get_examples_large_nchw_quant8_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_large_nchw_quant8_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_large_nchw_quant8_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_large_nchw_quant8_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_large_nchw_quant8_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6391,12 +6956,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_channel_nhwc,
                            grouped_conv2d::is_ignored_channel_nhwc,
-                           grouped_conv2d::examples_channel_nhwc);
+                           grouped_conv2d::get_examples_channel_nhwc());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_channel_nhwc) {
   const Model model = grouped_conv2d::createTestModel_channel_nhwc();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_channel_nhwc);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_channel_nhwc());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6406,12 +6971,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_channel_nhwc_weight_as_input,
                            grouped_conv2d::is_ignored_channel_nhwc_weight_as_input,
-                           grouped_conv2d::examples_channel_nhwc_weight_as_input);
+                           grouped_conv2d::get_examples_channel_nhwc_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_channel_nhwc_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_channel_nhwc_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_channel_nhwc_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_channel_nhwc_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6421,12 +6986,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_channel_nhwc_relaxed,
                            grouped_conv2d::is_ignored_channel_nhwc_relaxed,
-                           grouped_conv2d::examples_channel_nhwc_relaxed);
+                           grouped_conv2d::get_examples_channel_nhwc_relaxed());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_channel_nhwc_relaxed) {
   const Model model = grouped_conv2d::createTestModel_channel_nhwc_relaxed();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_channel_nhwc_relaxed);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_channel_nhwc_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6436,12 +7001,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_channel_nhwc_relaxed_weight_as_input,
                            grouped_conv2d::is_ignored_channel_nhwc_relaxed_weight_as_input,
-                           grouped_conv2d::examples_channel_nhwc_relaxed_weight_as_input);
+                           grouped_conv2d::get_examples_channel_nhwc_relaxed_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_channel_nhwc_relaxed_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_channel_nhwc_relaxed_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_channel_nhwc_relaxed_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_channel_nhwc_relaxed_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6451,12 +7016,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_channel_nhwc_quant8,
                            grouped_conv2d::is_ignored_channel_nhwc_quant8,
-                           grouped_conv2d::examples_channel_nhwc_quant8);
+                           grouped_conv2d::get_examples_channel_nhwc_quant8());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_channel_nhwc_quant8) {
   const Model model = grouped_conv2d::createTestModel_channel_nhwc_quant8();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_channel_nhwc_quant8);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_channel_nhwc_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6466,12 +7031,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_channel_nhwc_quant8_weight_as_input,
                            grouped_conv2d::is_ignored_channel_nhwc_quant8_weight_as_input,
-                           grouped_conv2d::examples_channel_nhwc_quant8_weight_as_input);
+                           grouped_conv2d::get_examples_channel_nhwc_quant8_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_channel_nhwc_quant8_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_channel_nhwc_quant8_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_channel_nhwc_quant8_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_channel_nhwc_quant8_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6481,12 +7046,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_channel_nchw,
                            grouped_conv2d::is_ignored_channel_nchw,
-                           grouped_conv2d::examples_channel_nchw);
+                           grouped_conv2d::get_examples_channel_nchw());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_channel_nchw) {
   const Model model = grouped_conv2d::createTestModel_channel_nchw();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_channel_nchw);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_channel_nchw());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6496,12 +7061,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_channel_nchw_weight_as_input,
                            grouped_conv2d::is_ignored_channel_nchw_weight_as_input,
-                           grouped_conv2d::examples_channel_nchw_weight_as_input);
+                           grouped_conv2d::get_examples_channel_nchw_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_channel_nchw_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_channel_nchw_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_channel_nchw_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_channel_nchw_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6511,12 +7076,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_channel_nchw_relaxed,
                            grouped_conv2d::is_ignored_channel_nchw_relaxed,
-                           grouped_conv2d::examples_channel_nchw_relaxed);
+                           grouped_conv2d::get_examples_channel_nchw_relaxed());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_channel_nchw_relaxed) {
   const Model model = grouped_conv2d::createTestModel_channel_nchw_relaxed();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_channel_nchw_relaxed);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_channel_nchw_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6526,12 +7091,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_channel_nchw_relaxed_weight_as_input,
                            grouped_conv2d::is_ignored_channel_nchw_relaxed_weight_as_input,
-                           grouped_conv2d::examples_channel_nchw_relaxed_weight_as_input);
+                           grouped_conv2d::get_examples_channel_nchw_relaxed_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_channel_nchw_relaxed_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_channel_nchw_relaxed_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_channel_nchw_relaxed_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_channel_nchw_relaxed_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6541,12 +7106,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_channel_nchw_quant8,
                            grouped_conv2d::is_ignored_channel_nchw_quant8,
-                           grouped_conv2d::examples_channel_nchw_quant8);
+                           grouped_conv2d::get_examples_channel_nchw_quant8());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_channel_nchw_quant8) {
   const Model model = grouped_conv2d::createTestModel_channel_nchw_quant8();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_channel_nchw_quant8);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_channel_nchw_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6556,12 +7121,12 @@
   generated_tests::Execute(device,
                            grouped_conv2d::createTestModel_channel_nchw_quant8_weight_as_input,
                            grouped_conv2d::is_ignored_channel_nchw_quant8_weight_as_input,
-                           grouped_conv2d::examples_channel_nchw_quant8_weight_as_input);
+                           grouped_conv2d::get_examples_channel_nchw_quant8_weight_as_input());
 }
 
 TEST_F(ValidationTest, grouped_conv2d_channel_nchw_quant8_weight_as_input) {
   const Model model = grouped_conv2d::createTestModel_channel_nchw_quant8_weight_as_input();
-  const std::vector<Request> requests = createRequests(grouped_conv2d::examples_channel_nchw_quant8_weight_as_input);
+  const std::vector<Request> requests = createRequests(grouped_conv2d::get_examples_channel_nchw_quant8_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6579,12 +7144,12 @@
   generated_tests::Execute(device,
                            heatmap_max_keypoint::createTestModel_nhwc,
                            heatmap_max_keypoint::is_ignored_nhwc,
-                           heatmap_max_keypoint::examples_nhwc);
+                           heatmap_max_keypoint::get_examples_nhwc());
 }
 
 TEST_F(ValidationTest, heatmap_max_keypoint_nhwc) {
   const Model model = heatmap_max_keypoint::createTestModel_nhwc();
-  const std::vector<Request> requests = createRequests(heatmap_max_keypoint::examples_nhwc);
+  const std::vector<Request> requests = createRequests(heatmap_max_keypoint::get_examples_nhwc());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6594,12 +7159,12 @@
   generated_tests::Execute(device,
                            heatmap_max_keypoint::createTestModel_nhwc_relaxed,
                            heatmap_max_keypoint::is_ignored_nhwc_relaxed,
-                           heatmap_max_keypoint::examples_nhwc_relaxed);
+                           heatmap_max_keypoint::get_examples_nhwc_relaxed());
 }
 
 TEST_F(ValidationTest, heatmap_max_keypoint_nhwc_relaxed) {
   const Model model = heatmap_max_keypoint::createTestModel_nhwc_relaxed();
-  const std::vector<Request> requests = createRequests(heatmap_max_keypoint::examples_nhwc_relaxed);
+  const std::vector<Request> requests = createRequests(heatmap_max_keypoint::get_examples_nhwc_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6609,12 +7174,12 @@
   generated_tests::Execute(device,
                            heatmap_max_keypoint::createTestModel_nchw,
                            heatmap_max_keypoint::is_ignored_nchw,
-                           heatmap_max_keypoint::examples_nchw);
+                           heatmap_max_keypoint::get_examples_nchw());
 }
 
 TEST_F(ValidationTest, heatmap_max_keypoint_nchw) {
   const Model model = heatmap_max_keypoint::createTestModel_nchw();
-  const std::vector<Request> requests = createRequests(heatmap_max_keypoint::examples_nchw);
+  const std::vector<Request> requests = createRequests(heatmap_max_keypoint::get_examples_nchw());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6624,12 +7189,12 @@
   generated_tests::Execute(device,
                            heatmap_max_keypoint::createTestModel_nchw_relaxed,
                            heatmap_max_keypoint::is_ignored_nchw_relaxed,
-                           heatmap_max_keypoint::examples_nchw_relaxed);
+                           heatmap_max_keypoint::get_examples_nchw_relaxed());
 }
 
 TEST_F(ValidationTest, heatmap_max_keypoint_nchw_relaxed) {
   const Model model = heatmap_max_keypoint::createTestModel_nchw_relaxed();
-  const std::vector<Request> requests = createRequests(heatmap_max_keypoint::examples_nchw_relaxed);
+  const std::vector<Request> requests = createRequests(heatmap_max_keypoint::get_examples_nchw_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6639,12 +7204,12 @@
   generated_tests::Execute(device,
                            heatmap_max_keypoint::createTestModel_nhwc_2,
                            heatmap_max_keypoint::is_ignored_nhwc_2,
-                           heatmap_max_keypoint::examples_nhwc_2);
+                           heatmap_max_keypoint::get_examples_nhwc_2());
 }
 
 TEST_F(ValidationTest, heatmap_max_keypoint_nhwc_2) {
   const Model model = heatmap_max_keypoint::createTestModel_nhwc_2();
-  const std::vector<Request> requests = createRequests(heatmap_max_keypoint::examples_nhwc_2);
+  const std::vector<Request> requests = createRequests(heatmap_max_keypoint::get_examples_nhwc_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6654,12 +7219,12 @@
   generated_tests::Execute(device,
                            heatmap_max_keypoint::createTestModel_nhwc_relaxed_2,
                            heatmap_max_keypoint::is_ignored_nhwc_relaxed_2,
-                           heatmap_max_keypoint::examples_nhwc_relaxed_2);
+                           heatmap_max_keypoint::get_examples_nhwc_relaxed_2());
 }
 
 TEST_F(ValidationTest, heatmap_max_keypoint_nhwc_relaxed_2) {
   const Model model = heatmap_max_keypoint::createTestModel_nhwc_relaxed_2();
-  const std::vector<Request> requests = createRequests(heatmap_max_keypoint::examples_nhwc_relaxed_2);
+  const std::vector<Request> requests = createRequests(heatmap_max_keypoint::get_examples_nhwc_relaxed_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6669,12 +7234,12 @@
   generated_tests::Execute(device,
                            heatmap_max_keypoint::createTestModel_nchw_2,
                            heatmap_max_keypoint::is_ignored_nchw_2,
-                           heatmap_max_keypoint::examples_nchw_2);
+                           heatmap_max_keypoint::get_examples_nchw_2());
 }
 
 TEST_F(ValidationTest, heatmap_max_keypoint_nchw_2) {
   const Model model = heatmap_max_keypoint::createTestModel_nchw_2();
-  const std::vector<Request> requests = createRequests(heatmap_max_keypoint::examples_nchw_2);
+  const std::vector<Request> requests = createRequests(heatmap_max_keypoint::get_examples_nchw_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6684,12 +7249,12 @@
   generated_tests::Execute(device,
                            heatmap_max_keypoint::createTestModel_nchw_relaxed_2,
                            heatmap_max_keypoint::is_ignored_nchw_relaxed_2,
-                           heatmap_max_keypoint::examples_nchw_relaxed_2);
+                           heatmap_max_keypoint::get_examples_nchw_relaxed_2());
 }
 
 TEST_F(ValidationTest, heatmap_max_keypoint_nchw_relaxed_2) {
   const Model model = heatmap_max_keypoint::createTestModel_nchw_relaxed_2();
-  const std::vector<Request> requests = createRequests(heatmap_max_keypoint::examples_nchw_relaxed_2);
+  const std::vector<Request> requests = createRequests(heatmap_max_keypoint::get_examples_nchw_relaxed_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6707,12 +7272,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_dim1_axis0,
                            l2_normalization_v1_2::is_ignored_dim1_axis0,
-                           l2_normalization_v1_2::examples_dim1_axis0);
+                           l2_normalization_v1_2::get_examples_dim1_axis0());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_dim1_axis0) {
   const Model model = l2_normalization_v1_2::createTestModel_dim1_axis0();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_dim1_axis0);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_dim1_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6722,12 +7287,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_dim2_axis1,
                            l2_normalization_v1_2::is_ignored_dim2_axis1,
-                           l2_normalization_v1_2::examples_dim2_axis1);
+                           l2_normalization_v1_2::get_examples_dim2_axis1());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_dim2_axis1) {
   const Model model = l2_normalization_v1_2::createTestModel_dim2_axis1();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_dim2_axis1);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_dim2_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6737,12 +7302,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_dim3_axis2,
                            l2_normalization_v1_2::is_ignored_dim3_axis2,
-                           l2_normalization_v1_2::examples_dim3_axis2);
+                           l2_normalization_v1_2::get_examples_dim3_axis2());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_dim3_axis2) {
   const Model model = l2_normalization_v1_2::createTestModel_dim3_axis2();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_dim3_axis2);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_dim3_axis2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6752,12 +7317,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_relaxed_dim1_axis0,
                            l2_normalization_v1_2::is_ignored_relaxed_dim1_axis0,
-                           l2_normalization_v1_2::examples_relaxed_dim1_axis0);
+                           l2_normalization_v1_2::get_examples_relaxed_dim1_axis0());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_relaxed_dim1_axis0) {
   const Model model = l2_normalization_v1_2::createTestModel_relaxed_dim1_axis0();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_relaxed_dim1_axis0);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_relaxed_dim1_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6767,12 +7332,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_relaxed_dim2_axis1,
                            l2_normalization_v1_2::is_ignored_relaxed_dim2_axis1,
-                           l2_normalization_v1_2::examples_relaxed_dim2_axis1);
+                           l2_normalization_v1_2::get_examples_relaxed_dim2_axis1());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_relaxed_dim2_axis1) {
   const Model model = l2_normalization_v1_2::createTestModel_relaxed_dim2_axis1();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_relaxed_dim2_axis1);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_relaxed_dim2_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6782,12 +7347,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_relaxed_dim3_axis2,
                            l2_normalization_v1_2::is_ignored_relaxed_dim3_axis2,
-                           l2_normalization_v1_2::examples_relaxed_dim3_axis2);
+                           l2_normalization_v1_2::get_examples_relaxed_dim3_axis2());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_relaxed_dim3_axis2) {
   const Model model = l2_normalization_v1_2::createTestModel_relaxed_dim3_axis2();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_relaxed_dim3_axis2);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_relaxed_dim3_axis2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6797,12 +7362,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_dim4_axis0,
                            l2_normalization_v1_2::is_ignored_axis_dim4_axis0,
-                           l2_normalization_v1_2::examples_axis_dim4_axis0);
+                           l2_normalization_v1_2::get_examples_axis_dim4_axis0());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_dim4_axis0) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_dim4_axis0();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_dim4_axis0);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_dim4_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6812,12 +7377,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_dim4_axis0_neg,
                            l2_normalization_v1_2::is_ignored_axis_dim4_axis0_neg,
-                           l2_normalization_v1_2::examples_axis_dim4_axis0_neg);
+                           l2_normalization_v1_2::get_examples_axis_dim4_axis0_neg());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_dim4_axis0_neg) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_dim4_axis0_neg();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_dim4_axis0_neg);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_dim4_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6827,12 +7392,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_dim4_axis1,
                            l2_normalization_v1_2::is_ignored_axis_dim4_axis1,
-                           l2_normalization_v1_2::examples_axis_dim4_axis1);
+                           l2_normalization_v1_2::get_examples_axis_dim4_axis1());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_dim4_axis1) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_dim4_axis1();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_dim4_axis1);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_dim4_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6842,12 +7407,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_dim4_axis1_neg,
                            l2_normalization_v1_2::is_ignored_axis_dim4_axis1_neg,
-                           l2_normalization_v1_2::examples_axis_dim4_axis1_neg);
+                           l2_normalization_v1_2::get_examples_axis_dim4_axis1_neg());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_dim4_axis1_neg) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_dim4_axis1_neg();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_dim4_axis1_neg);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_dim4_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6857,12 +7422,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_dim4_axis2,
                            l2_normalization_v1_2::is_ignored_axis_dim4_axis2,
-                           l2_normalization_v1_2::examples_axis_dim4_axis2);
+                           l2_normalization_v1_2::get_examples_axis_dim4_axis2());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_dim4_axis2) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_dim4_axis2();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_dim4_axis2);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_dim4_axis2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6872,12 +7437,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_dim4_axis2_neg,
                            l2_normalization_v1_2::is_ignored_axis_dim4_axis2_neg,
-                           l2_normalization_v1_2::examples_axis_dim4_axis2_neg);
+                           l2_normalization_v1_2::get_examples_axis_dim4_axis2_neg());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_dim4_axis2_neg) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_dim4_axis2_neg();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_dim4_axis2_neg);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_dim4_axis2_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6887,12 +7452,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_dim4_axis3,
                            l2_normalization_v1_2::is_ignored_axis_dim4_axis3,
-                           l2_normalization_v1_2::examples_axis_dim4_axis3);
+                           l2_normalization_v1_2::get_examples_axis_dim4_axis3());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_dim4_axis3) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_dim4_axis3();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_dim4_axis3);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_dim4_axis3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6902,12 +7467,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_dim4_axis3_neg,
                            l2_normalization_v1_2::is_ignored_axis_dim4_axis3_neg,
-                           l2_normalization_v1_2::examples_axis_dim4_axis3_neg);
+                           l2_normalization_v1_2::get_examples_axis_dim4_axis3_neg());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_dim4_axis3_neg) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_dim4_axis3_neg();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_dim4_axis3_neg);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_dim4_axis3_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6917,12 +7482,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_dim3_axis0,
                            l2_normalization_v1_2::is_ignored_axis_dim3_axis0,
-                           l2_normalization_v1_2::examples_axis_dim3_axis0);
+                           l2_normalization_v1_2::get_examples_axis_dim3_axis0());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_dim3_axis0) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_dim3_axis0();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_dim3_axis0);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_dim3_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6932,12 +7497,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_dim3_axis0_neg,
                            l2_normalization_v1_2::is_ignored_axis_dim3_axis0_neg,
-                           l2_normalization_v1_2::examples_axis_dim3_axis0_neg);
+                           l2_normalization_v1_2::get_examples_axis_dim3_axis0_neg());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_dim3_axis0_neg) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_dim3_axis0_neg();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_dim3_axis0_neg);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_dim3_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6947,12 +7512,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_dim3_axis1,
                            l2_normalization_v1_2::is_ignored_axis_dim3_axis1,
-                           l2_normalization_v1_2::examples_axis_dim3_axis1);
+                           l2_normalization_v1_2::get_examples_axis_dim3_axis1());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_dim3_axis1) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_dim3_axis1();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_dim3_axis1);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_dim3_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6962,12 +7527,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_dim3_axis1_neg,
                            l2_normalization_v1_2::is_ignored_axis_dim3_axis1_neg,
-                           l2_normalization_v1_2::examples_axis_dim3_axis1_neg);
+                           l2_normalization_v1_2::get_examples_axis_dim3_axis1_neg());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_dim3_axis1_neg) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_dim3_axis1_neg();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_dim3_axis1_neg);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_dim3_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6977,12 +7542,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_dim3_axis2,
                            l2_normalization_v1_2::is_ignored_axis_dim3_axis2,
-                           l2_normalization_v1_2::examples_axis_dim3_axis2);
+                           l2_normalization_v1_2::get_examples_axis_dim3_axis2());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_dim3_axis2) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_dim3_axis2();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_dim3_axis2);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_dim3_axis2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -6992,12 +7557,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_dim3_axis2_neg,
                            l2_normalization_v1_2::is_ignored_axis_dim3_axis2_neg,
-                           l2_normalization_v1_2::examples_axis_dim3_axis2_neg);
+                           l2_normalization_v1_2::get_examples_axis_dim3_axis2_neg());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_dim3_axis2_neg) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_dim3_axis2_neg();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_dim3_axis2_neg);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_dim3_axis2_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7007,12 +7572,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_dim2_axis0,
                            l2_normalization_v1_2::is_ignored_axis_dim2_axis0,
-                           l2_normalization_v1_2::examples_axis_dim2_axis0);
+                           l2_normalization_v1_2::get_examples_axis_dim2_axis0());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_dim2_axis0) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_dim2_axis0();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_dim2_axis0);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_dim2_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7022,12 +7587,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_dim2_axis0_neg,
                            l2_normalization_v1_2::is_ignored_axis_dim2_axis0_neg,
-                           l2_normalization_v1_2::examples_axis_dim2_axis0_neg);
+                           l2_normalization_v1_2::get_examples_axis_dim2_axis0_neg());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_dim2_axis0_neg) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_dim2_axis0_neg();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_dim2_axis0_neg);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_dim2_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7037,12 +7602,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_dim2_axis1,
                            l2_normalization_v1_2::is_ignored_axis_dim2_axis1,
-                           l2_normalization_v1_2::examples_axis_dim2_axis1);
+                           l2_normalization_v1_2::get_examples_axis_dim2_axis1());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_dim2_axis1) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_dim2_axis1();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_dim2_axis1);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_dim2_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7052,12 +7617,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_dim2_axis1_neg,
                            l2_normalization_v1_2::is_ignored_axis_dim2_axis1_neg,
-                           l2_normalization_v1_2::examples_axis_dim2_axis1_neg);
+                           l2_normalization_v1_2::get_examples_axis_dim2_axis1_neg());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_dim2_axis1_neg) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_dim2_axis1_neg();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_dim2_axis1_neg);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_dim2_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7067,12 +7632,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_dim1_axis0,
                            l2_normalization_v1_2::is_ignored_axis_dim1_axis0,
-                           l2_normalization_v1_2::examples_axis_dim1_axis0);
+                           l2_normalization_v1_2::get_examples_axis_dim1_axis0());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_dim1_axis0) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_dim1_axis0();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_dim1_axis0);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_dim1_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7082,12 +7647,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_dim1_axis0_neg,
                            l2_normalization_v1_2::is_ignored_axis_dim1_axis0_neg,
-                           l2_normalization_v1_2::examples_axis_dim1_axis0_neg);
+                           l2_normalization_v1_2::get_examples_axis_dim1_axis0_neg());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_dim1_axis0_neg) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_dim1_axis0_neg();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_dim1_axis0_neg);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_dim1_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7097,12 +7662,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis0,
                            l2_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis0,
-                           l2_normalization_v1_2::examples_axis_relaxed_dim4_axis0);
+                           l2_normalization_v1_2::get_examples_axis_relaxed_dim4_axis0());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_relaxed_dim4_axis0) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis0();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_relaxed_dim4_axis0);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_relaxed_dim4_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7112,12 +7677,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis0_neg,
                            l2_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis0_neg,
-                           l2_normalization_v1_2::examples_axis_relaxed_dim4_axis0_neg);
+                           l2_normalization_v1_2::get_examples_axis_relaxed_dim4_axis0_neg());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_relaxed_dim4_axis0_neg) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis0_neg();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_relaxed_dim4_axis0_neg);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_relaxed_dim4_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7127,12 +7692,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis1,
                            l2_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis1,
-                           l2_normalization_v1_2::examples_axis_relaxed_dim4_axis1);
+                           l2_normalization_v1_2::get_examples_axis_relaxed_dim4_axis1());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_relaxed_dim4_axis1) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis1();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_relaxed_dim4_axis1);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_relaxed_dim4_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7142,12 +7707,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis1_neg,
                            l2_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis1_neg,
-                           l2_normalization_v1_2::examples_axis_relaxed_dim4_axis1_neg);
+                           l2_normalization_v1_2::get_examples_axis_relaxed_dim4_axis1_neg());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_relaxed_dim4_axis1_neg) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis1_neg();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_relaxed_dim4_axis1_neg);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_relaxed_dim4_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7157,12 +7722,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis2,
                            l2_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis2,
-                           l2_normalization_v1_2::examples_axis_relaxed_dim4_axis2);
+                           l2_normalization_v1_2::get_examples_axis_relaxed_dim4_axis2());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_relaxed_dim4_axis2) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis2();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_relaxed_dim4_axis2);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_relaxed_dim4_axis2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7172,12 +7737,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis2_neg,
                            l2_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis2_neg,
-                           l2_normalization_v1_2::examples_axis_relaxed_dim4_axis2_neg);
+                           l2_normalization_v1_2::get_examples_axis_relaxed_dim4_axis2_neg());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_relaxed_dim4_axis2_neg) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis2_neg();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_relaxed_dim4_axis2_neg);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_relaxed_dim4_axis2_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7187,12 +7752,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis3,
                            l2_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis3,
-                           l2_normalization_v1_2::examples_axis_relaxed_dim4_axis3);
+                           l2_normalization_v1_2::get_examples_axis_relaxed_dim4_axis3());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_relaxed_dim4_axis3) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis3();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_relaxed_dim4_axis3);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_relaxed_dim4_axis3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7202,12 +7767,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis3_neg,
                            l2_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis3_neg,
-                           l2_normalization_v1_2::examples_axis_relaxed_dim4_axis3_neg);
+                           l2_normalization_v1_2::get_examples_axis_relaxed_dim4_axis3_neg());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_relaxed_dim4_axis3_neg) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis3_neg();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_relaxed_dim4_axis3_neg);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_relaxed_dim4_axis3_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7217,12 +7782,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis0,
                            l2_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis0,
-                           l2_normalization_v1_2::examples_axis_relaxed_dim3_axis0);
+                           l2_normalization_v1_2::get_examples_axis_relaxed_dim3_axis0());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_relaxed_dim3_axis0) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis0();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_relaxed_dim3_axis0);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_relaxed_dim3_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7232,12 +7797,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis0_neg,
                            l2_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis0_neg,
-                           l2_normalization_v1_2::examples_axis_relaxed_dim3_axis0_neg);
+                           l2_normalization_v1_2::get_examples_axis_relaxed_dim3_axis0_neg());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_relaxed_dim3_axis0_neg) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis0_neg();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_relaxed_dim3_axis0_neg);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_relaxed_dim3_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7247,12 +7812,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis1,
                            l2_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis1,
-                           l2_normalization_v1_2::examples_axis_relaxed_dim3_axis1);
+                           l2_normalization_v1_2::get_examples_axis_relaxed_dim3_axis1());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_relaxed_dim3_axis1) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis1();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_relaxed_dim3_axis1);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_relaxed_dim3_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7262,12 +7827,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis1_neg,
                            l2_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis1_neg,
-                           l2_normalization_v1_2::examples_axis_relaxed_dim3_axis1_neg);
+                           l2_normalization_v1_2::get_examples_axis_relaxed_dim3_axis1_neg());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_relaxed_dim3_axis1_neg) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis1_neg();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_relaxed_dim3_axis1_neg);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_relaxed_dim3_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7277,12 +7842,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis2,
                            l2_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis2,
-                           l2_normalization_v1_2::examples_axis_relaxed_dim3_axis2);
+                           l2_normalization_v1_2::get_examples_axis_relaxed_dim3_axis2());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_relaxed_dim3_axis2) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis2();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_relaxed_dim3_axis2);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_relaxed_dim3_axis2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7292,12 +7857,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis2_neg,
                            l2_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis2_neg,
-                           l2_normalization_v1_2::examples_axis_relaxed_dim3_axis2_neg);
+                           l2_normalization_v1_2::get_examples_axis_relaxed_dim3_axis2_neg());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_relaxed_dim3_axis2_neg) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis2_neg();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_relaxed_dim3_axis2_neg);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_relaxed_dim3_axis2_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7307,12 +7872,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis0,
                            l2_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis0,
-                           l2_normalization_v1_2::examples_axis_relaxed_dim2_axis0);
+                           l2_normalization_v1_2::get_examples_axis_relaxed_dim2_axis0());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_relaxed_dim2_axis0) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis0();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_relaxed_dim2_axis0);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_relaxed_dim2_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7322,12 +7887,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis0_neg,
                            l2_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis0_neg,
-                           l2_normalization_v1_2::examples_axis_relaxed_dim2_axis0_neg);
+                           l2_normalization_v1_2::get_examples_axis_relaxed_dim2_axis0_neg());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_relaxed_dim2_axis0_neg) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis0_neg();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_relaxed_dim2_axis0_neg);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_relaxed_dim2_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7337,12 +7902,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis1,
                            l2_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis1,
-                           l2_normalization_v1_2::examples_axis_relaxed_dim2_axis1);
+                           l2_normalization_v1_2::get_examples_axis_relaxed_dim2_axis1());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_relaxed_dim2_axis1) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis1();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_relaxed_dim2_axis1);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_relaxed_dim2_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7352,12 +7917,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis1_neg,
                            l2_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis1_neg,
-                           l2_normalization_v1_2::examples_axis_relaxed_dim2_axis1_neg);
+                           l2_normalization_v1_2::get_examples_axis_relaxed_dim2_axis1_neg());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_relaxed_dim2_axis1_neg) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis1_neg();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_relaxed_dim2_axis1_neg);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_relaxed_dim2_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7367,12 +7932,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_relaxed_dim1_axis0,
                            l2_normalization_v1_2::is_ignored_axis_relaxed_dim1_axis0,
-                           l2_normalization_v1_2::examples_axis_relaxed_dim1_axis0);
+                           l2_normalization_v1_2::get_examples_axis_relaxed_dim1_axis0());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_relaxed_dim1_axis0) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_relaxed_dim1_axis0();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_relaxed_dim1_axis0);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_relaxed_dim1_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7382,12 +7947,12 @@
   generated_tests::Execute(device,
                            l2_normalization_v1_2::createTestModel_axis_relaxed_dim1_axis0_neg,
                            l2_normalization_v1_2::is_ignored_axis_relaxed_dim1_axis0_neg,
-                           l2_normalization_v1_2::examples_axis_relaxed_dim1_axis0_neg);
+                           l2_normalization_v1_2::get_examples_axis_relaxed_dim1_axis0_neg());
 }
 
 TEST_F(ValidationTest, l2_normalization_v1_2_axis_relaxed_dim1_axis0_neg) {
   const Model model = l2_normalization_v1_2::createTestModel_axis_relaxed_dim1_axis0_neg();
-  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::examples_axis_relaxed_dim1_axis0_neg);
+  const std::vector<Request> requests = createRequests(l2_normalization_v1_2::get_examples_axis_relaxed_dim1_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7405,12 +7970,12 @@
   generated_tests::Execute(device,
                            l2_pool_v1_2::createTestModel_nhwc,
                            l2_pool_v1_2::is_ignored_nhwc,
-                           l2_pool_v1_2::examples_nhwc);
+                           l2_pool_v1_2::get_examples_nhwc());
 }
 
 TEST_F(ValidationTest, l2_pool_v1_2_nhwc) {
   const Model model = l2_pool_v1_2::createTestModel_nhwc();
-  const std::vector<Request> requests = createRequests(l2_pool_v1_2::examples_nhwc);
+  const std::vector<Request> requests = createRequests(l2_pool_v1_2::get_examples_nhwc());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7420,12 +7985,12 @@
   generated_tests::Execute(device,
                            l2_pool_v1_2::createTestModel_nhwc_relaxed,
                            l2_pool_v1_2::is_ignored_nhwc_relaxed,
-                           l2_pool_v1_2::examples_nhwc_relaxed);
+                           l2_pool_v1_2::get_examples_nhwc_relaxed());
 }
 
 TEST_F(ValidationTest, l2_pool_v1_2_nhwc_relaxed) {
   const Model model = l2_pool_v1_2::createTestModel_nhwc_relaxed();
-  const std::vector<Request> requests = createRequests(l2_pool_v1_2::examples_nhwc_relaxed);
+  const std::vector<Request> requests = createRequests(l2_pool_v1_2::get_examples_nhwc_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7435,12 +8000,12 @@
   generated_tests::Execute(device,
                            l2_pool_v1_2::createTestModel_nchw,
                            l2_pool_v1_2::is_ignored_nchw,
-                           l2_pool_v1_2::examples_nchw);
+                           l2_pool_v1_2::get_examples_nchw());
 }
 
 TEST_F(ValidationTest, l2_pool_v1_2_nchw) {
   const Model model = l2_pool_v1_2::createTestModel_nchw();
-  const std::vector<Request> requests = createRequests(l2_pool_v1_2::examples_nchw);
+  const std::vector<Request> requests = createRequests(l2_pool_v1_2::get_examples_nchw());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7450,12 +8015,12 @@
   generated_tests::Execute(device,
                            l2_pool_v1_2::createTestModel_nchw_relaxed,
                            l2_pool_v1_2::is_ignored_nchw_relaxed,
-                           l2_pool_v1_2::examples_nchw_relaxed);
+                           l2_pool_v1_2::get_examples_nchw_relaxed());
 }
 
 TEST_F(ValidationTest, l2_pool_v1_2_nchw_relaxed) {
   const Model model = l2_pool_v1_2::createTestModel_nchw_relaxed();
-  const std::vector<Request> requests = createRequests(l2_pool_v1_2::examples_nchw_relaxed);
+  const std::vector<Request> requests = createRequests(l2_pool_v1_2::get_examples_nchw_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7465,12 +8030,12 @@
   generated_tests::Execute(device,
                            l2_pool_v1_2::createTestModel_nhwc_2,
                            l2_pool_v1_2::is_ignored_nhwc_2,
-                           l2_pool_v1_2::examples_nhwc_2);
+                           l2_pool_v1_2::get_examples_nhwc_2());
 }
 
 TEST_F(ValidationTest, l2_pool_v1_2_nhwc_2) {
   const Model model = l2_pool_v1_2::createTestModel_nhwc_2();
-  const std::vector<Request> requests = createRequests(l2_pool_v1_2::examples_nhwc_2);
+  const std::vector<Request> requests = createRequests(l2_pool_v1_2::get_examples_nhwc_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7480,12 +8045,12 @@
   generated_tests::Execute(device,
                            l2_pool_v1_2::createTestModel_nhwc_relaxed_2,
                            l2_pool_v1_2::is_ignored_nhwc_relaxed_2,
-                           l2_pool_v1_2::examples_nhwc_relaxed_2);
+                           l2_pool_v1_2::get_examples_nhwc_relaxed_2());
 }
 
 TEST_F(ValidationTest, l2_pool_v1_2_nhwc_relaxed_2) {
   const Model model = l2_pool_v1_2::createTestModel_nhwc_relaxed_2();
-  const std::vector<Request> requests = createRequests(l2_pool_v1_2::examples_nhwc_relaxed_2);
+  const std::vector<Request> requests = createRequests(l2_pool_v1_2::get_examples_nhwc_relaxed_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7495,12 +8060,12 @@
   generated_tests::Execute(device,
                            l2_pool_v1_2::createTestModel_nchw_2,
                            l2_pool_v1_2::is_ignored_nchw_2,
-                           l2_pool_v1_2::examples_nchw_2);
+                           l2_pool_v1_2::get_examples_nchw_2());
 }
 
 TEST_F(ValidationTest, l2_pool_v1_2_nchw_2) {
   const Model model = l2_pool_v1_2::createTestModel_nchw_2();
-  const std::vector<Request> requests = createRequests(l2_pool_v1_2::examples_nchw_2);
+  const std::vector<Request> requests = createRequests(l2_pool_v1_2::get_examples_nchw_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7510,12 +8075,12 @@
   generated_tests::Execute(device,
                            l2_pool_v1_2::createTestModel_nchw_relaxed_2,
                            l2_pool_v1_2::is_ignored_nchw_relaxed_2,
-                           l2_pool_v1_2::examples_nchw_relaxed_2);
+                           l2_pool_v1_2::get_examples_nchw_relaxed_2());
 }
 
 TEST_F(ValidationTest, l2_pool_v1_2_nchw_relaxed_2) {
   const Model model = l2_pool_v1_2::createTestModel_nchw_relaxed_2();
-  const std::vector<Request> requests = createRequests(l2_pool_v1_2::examples_nchw_relaxed_2);
+  const std::vector<Request> requests = createRequests(l2_pool_v1_2::get_examples_nchw_relaxed_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7525,12 +8090,12 @@
   generated_tests::Execute(device,
                            l2_pool_v1_2::createTestModel_large_nhwc,
                            l2_pool_v1_2::is_ignored_large_nhwc,
-                           l2_pool_v1_2::examples_large_nhwc);
+                           l2_pool_v1_2::get_examples_large_nhwc());
 }
 
 TEST_F(ValidationTest, l2_pool_v1_2_large_nhwc) {
   const Model model = l2_pool_v1_2::createTestModel_large_nhwc();
-  const std::vector<Request> requests = createRequests(l2_pool_v1_2::examples_large_nhwc);
+  const std::vector<Request> requests = createRequests(l2_pool_v1_2::get_examples_large_nhwc());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7540,12 +8105,12 @@
   generated_tests::Execute(device,
                            l2_pool_v1_2::createTestModel_large_nhwc_relaxed,
                            l2_pool_v1_2::is_ignored_large_nhwc_relaxed,
-                           l2_pool_v1_2::examples_large_nhwc_relaxed);
+                           l2_pool_v1_2::get_examples_large_nhwc_relaxed());
 }
 
 TEST_F(ValidationTest, l2_pool_v1_2_large_nhwc_relaxed) {
   const Model model = l2_pool_v1_2::createTestModel_large_nhwc_relaxed();
-  const std::vector<Request> requests = createRequests(l2_pool_v1_2::examples_large_nhwc_relaxed);
+  const std::vector<Request> requests = createRequests(l2_pool_v1_2::get_examples_large_nhwc_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7555,12 +8120,12 @@
   generated_tests::Execute(device,
                            l2_pool_v1_2::createTestModel_large_nchw,
                            l2_pool_v1_2::is_ignored_large_nchw,
-                           l2_pool_v1_2::examples_large_nchw);
+                           l2_pool_v1_2::get_examples_large_nchw());
 }
 
 TEST_F(ValidationTest, l2_pool_v1_2_large_nchw) {
   const Model model = l2_pool_v1_2::createTestModel_large_nchw();
-  const std::vector<Request> requests = createRequests(l2_pool_v1_2::examples_large_nchw);
+  const std::vector<Request> requests = createRequests(l2_pool_v1_2::get_examples_large_nchw());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7570,12 +8135,12 @@
   generated_tests::Execute(device,
                            l2_pool_v1_2::createTestModel_large_nchw_relaxed,
                            l2_pool_v1_2::is_ignored_large_nchw_relaxed,
-                           l2_pool_v1_2::examples_large_nchw_relaxed);
+                           l2_pool_v1_2::get_examples_large_nchw_relaxed());
 }
 
 TEST_F(ValidationTest, l2_pool_v1_2_large_nchw_relaxed) {
   const Model model = l2_pool_v1_2::createTestModel_large_nchw_relaxed();
-  const std::vector<Request> requests = createRequests(l2_pool_v1_2::examples_large_nchw_relaxed);
+  const std::vector<Request> requests = createRequests(l2_pool_v1_2::get_examples_large_nchw_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7593,12 +8158,12 @@
   generated_tests::Execute(device,
                            layer_norm_lstm::createTestModel,
                            layer_norm_lstm::is_ignored,
-                           layer_norm_lstm::examples);
+                           layer_norm_lstm::get_examples());
 }
 
 TEST_F(ValidationTest, layer_norm_lstm) {
   const Model model = layer_norm_lstm::createTestModel();
-  const std::vector<Request> requests = createRequests(layer_norm_lstm::examples);
+  const std::vector<Request> requests = createRequests(layer_norm_lstm::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7616,12 +8181,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim4_axis0,
                            local_response_normalization_v1_2::is_ignored_axis_dim4_axis0,
-                           local_response_normalization_v1_2::examples_axis_dim4_axis0);
+                           local_response_normalization_v1_2::get_examples_axis_dim4_axis0());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim4_axis0) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim4_axis0();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim4_axis0);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim4_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7631,12 +8196,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim4_axis0_neg,
                            local_response_normalization_v1_2::is_ignored_axis_dim4_axis0_neg,
-                           local_response_normalization_v1_2::examples_axis_dim4_axis0_neg);
+                           local_response_normalization_v1_2::get_examples_axis_dim4_axis0_neg());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim4_axis0_neg) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim4_axis0_neg();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim4_axis0_neg);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim4_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7646,12 +8211,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim4_axis1,
                            local_response_normalization_v1_2::is_ignored_axis_dim4_axis1,
-                           local_response_normalization_v1_2::examples_axis_dim4_axis1);
+                           local_response_normalization_v1_2::get_examples_axis_dim4_axis1());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim4_axis1) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim4_axis1();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim4_axis1);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim4_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7661,12 +8226,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim4_axis1_neg,
                            local_response_normalization_v1_2::is_ignored_axis_dim4_axis1_neg,
-                           local_response_normalization_v1_2::examples_axis_dim4_axis1_neg);
+                           local_response_normalization_v1_2::get_examples_axis_dim4_axis1_neg());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim4_axis1_neg) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim4_axis1_neg();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim4_axis1_neg);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim4_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7676,12 +8241,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim4_axis2,
                            local_response_normalization_v1_2::is_ignored_axis_dim4_axis2,
-                           local_response_normalization_v1_2::examples_axis_dim4_axis2);
+                           local_response_normalization_v1_2::get_examples_axis_dim4_axis2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim4_axis2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim4_axis2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim4_axis2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim4_axis2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7691,12 +8256,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim4_axis2_neg,
                            local_response_normalization_v1_2::is_ignored_axis_dim4_axis2_neg,
-                           local_response_normalization_v1_2::examples_axis_dim4_axis2_neg);
+                           local_response_normalization_v1_2::get_examples_axis_dim4_axis2_neg());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim4_axis2_neg) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim4_axis2_neg();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim4_axis2_neg);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim4_axis2_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7706,12 +8271,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim4_axis3,
                            local_response_normalization_v1_2::is_ignored_axis_dim4_axis3,
-                           local_response_normalization_v1_2::examples_axis_dim4_axis3);
+                           local_response_normalization_v1_2::get_examples_axis_dim4_axis3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim4_axis3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim4_axis3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim4_axis3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim4_axis3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7721,12 +8286,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim4_axis3_neg,
                            local_response_normalization_v1_2::is_ignored_axis_dim4_axis3_neg,
-                           local_response_normalization_v1_2::examples_axis_dim4_axis3_neg);
+                           local_response_normalization_v1_2::get_examples_axis_dim4_axis3_neg());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim4_axis3_neg) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim4_axis3_neg();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim4_axis3_neg);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim4_axis3_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7736,12 +8301,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim3_axis0,
                            local_response_normalization_v1_2::is_ignored_axis_dim3_axis0,
-                           local_response_normalization_v1_2::examples_axis_dim3_axis0);
+                           local_response_normalization_v1_2::get_examples_axis_dim3_axis0());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim3_axis0) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim3_axis0();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim3_axis0);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim3_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7751,12 +8316,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim3_axis0_neg,
                            local_response_normalization_v1_2::is_ignored_axis_dim3_axis0_neg,
-                           local_response_normalization_v1_2::examples_axis_dim3_axis0_neg);
+                           local_response_normalization_v1_2::get_examples_axis_dim3_axis0_neg());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim3_axis0_neg) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim3_axis0_neg();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim3_axis0_neg);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim3_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7766,12 +8331,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim3_axis1,
                            local_response_normalization_v1_2::is_ignored_axis_dim3_axis1,
-                           local_response_normalization_v1_2::examples_axis_dim3_axis1);
+                           local_response_normalization_v1_2::get_examples_axis_dim3_axis1());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim3_axis1) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim3_axis1();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim3_axis1);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim3_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7781,12 +8346,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim3_axis1_neg,
                            local_response_normalization_v1_2::is_ignored_axis_dim3_axis1_neg,
-                           local_response_normalization_v1_2::examples_axis_dim3_axis1_neg);
+                           local_response_normalization_v1_2::get_examples_axis_dim3_axis1_neg());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim3_axis1_neg) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim3_axis1_neg();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim3_axis1_neg);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim3_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7796,12 +8361,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim3_axis2,
                            local_response_normalization_v1_2::is_ignored_axis_dim3_axis2,
-                           local_response_normalization_v1_2::examples_axis_dim3_axis2);
+                           local_response_normalization_v1_2::get_examples_axis_dim3_axis2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim3_axis2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim3_axis2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim3_axis2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim3_axis2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7811,12 +8376,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim3_axis2_neg,
                            local_response_normalization_v1_2::is_ignored_axis_dim3_axis2_neg,
-                           local_response_normalization_v1_2::examples_axis_dim3_axis2_neg);
+                           local_response_normalization_v1_2::get_examples_axis_dim3_axis2_neg());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim3_axis2_neg) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim3_axis2_neg();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim3_axis2_neg);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim3_axis2_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7826,12 +8391,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim2_axis0,
                            local_response_normalization_v1_2::is_ignored_axis_dim2_axis0,
-                           local_response_normalization_v1_2::examples_axis_dim2_axis0);
+                           local_response_normalization_v1_2::get_examples_axis_dim2_axis0());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim2_axis0) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim2_axis0();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim2_axis0);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim2_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7841,12 +8406,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim2_axis0_neg,
                            local_response_normalization_v1_2::is_ignored_axis_dim2_axis0_neg,
-                           local_response_normalization_v1_2::examples_axis_dim2_axis0_neg);
+                           local_response_normalization_v1_2::get_examples_axis_dim2_axis0_neg());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim2_axis0_neg) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim2_axis0_neg();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim2_axis0_neg);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim2_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7856,12 +8421,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim2_axis1,
                            local_response_normalization_v1_2::is_ignored_axis_dim2_axis1,
-                           local_response_normalization_v1_2::examples_axis_dim2_axis1);
+                           local_response_normalization_v1_2::get_examples_axis_dim2_axis1());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim2_axis1) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim2_axis1();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim2_axis1);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim2_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7871,12 +8436,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim2_axis1_neg,
                            local_response_normalization_v1_2::is_ignored_axis_dim2_axis1_neg,
-                           local_response_normalization_v1_2::examples_axis_dim2_axis1_neg);
+                           local_response_normalization_v1_2::get_examples_axis_dim2_axis1_neg());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim2_axis1_neg) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim2_axis1_neg();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim2_axis1_neg);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim2_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7886,12 +8451,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim1_axis0,
                            local_response_normalization_v1_2::is_ignored_axis_dim1_axis0,
-                           local_response_normalization_v1_2::examples_axis_dim1_axis0);
+                           local_response_normalization_v1_2::get_examples_axis_dim1_axis0());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim1_axis0) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim1_axis0();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim1_axis0);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim1_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7901,12 +8466,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim1_axis0_neg,
                            local_response_normalization_v1_2::is_ignored_axis_dim1_axis0_neg,
-                           local_response_normalization_v1_2::examples_axis_dim1_axis0_neg);
+                           local_response_normalization_v1_2::get_examples_axis_dim1_axis0_neg());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim1_axis0_neg) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim1_axis0_neg();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim1_axis0_neg);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim1_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7916,12 +8481,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis0,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis0,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis0);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis0());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim4_axis0) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis0();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis0);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7931,12 +8496,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis0_neg,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis0_neg,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis0_neg);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis0_neg());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim4_axis0_neg) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis0_neg();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis0_neg);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7946,12 +8511,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis1,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis1,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis1);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis1());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim4_axis1) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis1();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis1);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7961,12 +8526,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis1_neg,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis1_neg,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis1_neg);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis1_neg());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim4_axis1_neg) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis1_neg();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis1_neg);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7976,12 +8541,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis2,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis2,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis2);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim4_axis2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -7991,12 +8556,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis2_neg,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis2_neg,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis2_neg);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis2_neg());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim4_axis2_neg) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis2_neg();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis2_neg);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis2_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8006,12 +8571,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis3,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis3,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis3);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim4_axis3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8021,12 +8586,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis3_neg,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis3_neg,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis3_neg);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis3_neg());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim4_axis3_neg) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis3_neg();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis3_neg);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis3_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8036,12 +8601,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis0,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis0,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis0);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis0());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim3_axis0) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis0();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis0);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8051,12 +8616,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis0_neg,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis0_neg,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis0_neg);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis0_neg());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim3_axis0_neg) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis0_neg();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis0_neg);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8066,12 +8631,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis1,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis1,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis1);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis1());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim3_axis1) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis1();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis1);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8081,12 +8646,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis1_neg,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis1_neg,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis1_neg);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis1_neg());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim3_axis1_neg) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis1_neg();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis1_neg);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8096,12 +8661,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis2,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis2,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis2);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim3_axis2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8111,12 +8676,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis2_neg,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis2_neg,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis2_neg);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis2_neg());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim3_axis2_neg) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis2_neg();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis2_neg);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis2_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8126,12 +8691,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis0,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis0,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis0);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis0());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim2_axis0) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis0();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis0);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8141,12 +8706,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis0_neg,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis0_neg,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis0_neg);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis0_neg());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim2_axis0_neg) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis0_neg();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis0_neg);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8156,12 +8721,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis1,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis1,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis1);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis1());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim2_axis1) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis1();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis1);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8171,12 +8736,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis1_neg,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis1_neg,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis1_neg);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis1_neg());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim2_axis1_neg) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis1_neg();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis1_neg);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8186,12 +8751,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim1_axis0,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim1_axis0,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim1_axis0);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim1_axis0());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim1_axis0) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim1_axis0();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim1_axis0);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim1_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8201,12 +8766,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim1_axis0_neg,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim1_axis0_neg,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim1_axis0_neg);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim1_axis0_neg());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim1_axis0_neg) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim1_axis0_neg();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim1_axis0_neg);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim1_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8216,12 +8781,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim4_axis0_2,
                            local_response_normalization_v1_2::is_ignored_axis_dim4_axis0_2,
-                           local_response_normalization_v1_2::examples_axis_dim4_axis0_2);
+                           local_response_normalization_v1_2::get_examples_axis_dim4_axis0_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim4_axis0_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim4_axis0_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim4_axis0_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim4_axis0_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8231,12 +8796,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim4_axis0_neg_2,
                            local_response_normalization_v1_2::is_ignored_axis_dim4_axis0_neg_2,
-                           local_response_normalization_v1_2::examples_axis_dim4_axis0_neg_2);
+                           local_response_normalization_v1_2::get_examples_axis_dim4_axis0_neg_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim4_axis0_neg_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim4_axis0_neg_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim4_axis0_neg_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim4_axis0_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8246,12 +8811,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim4_axis1_2,
                            local_response_normalization_v1_2::is_ignored_axis_dim4_axis1_2,
-                           local_response_normalization_v1_2::examples_axis_dim4_axis1_2);
+                           local_response_normalization_v1_2::get_examples_axis_dim4_axis1_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim4_axis1_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim4_axis1_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim4_axis1_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim4_axis1_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8261,12 +8826,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim4_axis1_neg_2,
                            local_response_normalization_v1_2::is_ignored_axis_dim4_axis1_neg_2,
-                           local_response_normalization_v1_2::examples_axis_dim4_axis1_neg_2);
+                           local_response_normalization_v1_2::get_examples_axis_dim4_axis1_neg_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim4_axis1_neg_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim4_axis1_neg_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim4_axis1_neg_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim4_axis1_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8276,12 +8841,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim4_axis2_2,
                            local_response_normalization_v1_2::is_ignored_axis_dim4_axis2_2,
-                           local_response_normalization_v1_2::examples_axis_dim4_axis2_2);
+                           local_response_normalization_v1_2::get_examples_axis_dim4_axis2_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim4_axis2_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim4_axis2_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim4_axis2_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim4_axis2_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8291,12 +8856,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim4_axis2_neg_2,
                            local_response_normalization_v1_2::is_ignored_axis_dim4_axis2_neg_2,
-                           local_response_normalization_v1_2::examples_axis_dim4_axis2_neg_2);
+                           local_response_normalization_v1_2::get_examples_axis_dim4_axis2_neg_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim4_axis2_neg_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim4_axis2_neg_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim4_axis2_neg_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim4_axis2_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8306,12 +8871,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim4_axis3_2,
                            local_response_normalization_v1_2::is_ignored_axis_dim4_axis3_2,
-                           local_response_normalization_v1_2::examples_axis_dim4_axis3_2);
+                           local_response_normalization_v1_2::get_examples_axis_dim4_axis3_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim4_axis3_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim4_axis3_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim4_axis3_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim4_axis3_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8321,12 +8886,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim4_axis3_neg_2,
                            local_response_normalization_v1_2::is_ignored_axis_dim4_axis3_neg_2,
-                           local_response_normalization_v1_2::examples_axis_dim4_axis3_neg_2);
+                           local_response_normalization_v1_2::get_examples_axis_dim4_axis3_neg_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim4_axis3_neg_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim4_axis3_neg_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim4_axis3_neg_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim4_axis3_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8336,12 +8901,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim3_axis0_2,
                            local_response_normalization_v1_2::is_ignored_axis_dim3_axis0_2,
-                           local_response_normalization_v1_2::examples_axis_dim3_axis0_2);
+                           local_response_normalization_v1_2::get_examples_axis_dim3_axis0_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim3_axis0_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim3_axis0_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim3_axis0_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim3_axis0_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8351,12 +8916,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim3_axis0_neg_2,
                            local_response_normalization_v1_2::is_ignored_axis_dim3_axis0_neg_2,
-                           local_response_normalization_v1_2::examples_axis_dim3_axis0_neg_2);
+                           local_response_normalization_v1_2::get_examples_axis_dim3_axis0_neg_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim3_axis0_neg_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim3_axis0_neg_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim3_axis0_neg_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim3_axis0_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8366,12 +8931,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim3_axis1_2,
                            local_response_normalization_v1_2::is_ignored_axis_dim3_axis1_2,
-                           local_response_normalization_v1_2::examples_axis_dim3_axis1_2);
+                           local_response_normalization_v1_2::get_examples_axis_dim3_axis1_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim3_axis1_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim3_axis1_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim3_axis1_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim3_axis1_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8381,12 +8946,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim3_axis1_neg_2,
                            local_response_normalization_v1_2::is_ignored_axis_dim3_axis1_neg_2,
-                           local_response_normalization_v1_2::examples_axis_dim3_axis1_neg_2);
+                           local_response_normalization_v1_2::get_examples_axis_dim3_axis1_neg_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim3_axis1_neg_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim3_axis1_neg_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim3_axis1_neg_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim3_axis1_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8396,12 +8961,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim3_axis2_2,
                            local_response_normalization_v1_2::is_ignored_axis_dim3_axis2_2,
-                           local_response_normalization_v1_2::examples_axis_dim3_axis2_2);
+                           local_response_normalization_v1_2::get_examples_axis_dim3_axis2_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim3_axis2_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim3_axis2_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim3_axis2_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim3_axis2_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8411,12 +8976,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim3_axis2_neg_2,
                            local_response_normalization_v1_2::is_ignored_axis_dim3_axis2_neg_2,
-                           local_response_normalization_v1_2::examples_axis_dim3_axis2_neg_2);
+                           local_response_normalization_v1_2::get_examples_axis_dim3_axis2_neg_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim3_axis2_neg_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim3_axis2_neg_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim3_axis2_neg_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim3_axis2_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8426,12 +8991,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim2_axis0_2,
                            local_response_normalization_v1_2::is_ignored_axis_dim2_axis0_2,
-                           local_response_normalization_v1_2::examples_axis_dim2_axis0_2);
+                           local_response_normalization_v1_2::get_examples_axis_dim2_axis0_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim2_axis0_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim2_axis0_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim2_axis0_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim2_axis0_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8441,12 +9006,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim2_axis0_neg_2,
                            local_response_normalization_v1_2::is_ignored_axis_dim2_axis0_neg_2,
-                           local_response_normalization_v1_2::examples_axis_dim2_axis0_neg_2);
+                           local_response_normalization_v1_2::get_examples_axis_dim2_axis0_neg_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim2_axis0_neg_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim2_axis0_neg_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim2_axis0_neg_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim2_axis0_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8456,12 +9021,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim2_axis1_2,
                            local_response_normalization_v1_2::is_ignored_axis_dim2_axis1_2,
-                           local_response_normalization_v1_2::examples_axis_dim2_axis1_2);
+                           local_response_normalization_v1_2::get_examples_axis_dim2_axis1_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim2_axis1_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim2_axis1_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim2_axis1_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim2_axis1_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8471,12 +9036,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim2_axis1_neg_2,
                            local_response_normalization_v1_2::is_ignored_axis_dim2_axis1_neg_2,
-                           local_response_normalization_v1_2::examples_axis_dim2_axis1_neg_2);
+                           local_response_normalization_v1_2::get_examples_axis_dim2_axis1_neg_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim2_axis1_neg_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim2_axis1_neg_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim2_axis1_neg_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim2_axis1_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8486,12 +9051,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim1_axis0_2,
                            local_response_normalization_v1_2::is_ignored_axis_dim1_axis0_2,
-                           local_response_normalization_v1_2::examples_axis_dim1_axis0_2);
+                           local_response_normalization_v1_2::get_examples_axis_dim1_axis0_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim1_axis0_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim1_axis0_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim1_axis0_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim1_axis0_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8501,12 +9066,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim1_axis0_neg_2,
                            local_response_normalization_v1_2::is_ignored_axis_dim1_axis0_neg_2,
-                           local_response_normalization_v1_2::examples_axis_dim1_axis0_neg_2);
+                           local_response_normalization_v1_2::get_examples_axis_dim1_axis0_neg_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim1_axis0_neg_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim1_axis0_neg_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim1_axis0_neg_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim1_axis0_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8516,12 +9081,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis0_2,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis0_2,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis0_2);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis0_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim4_axis0_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis0_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis0_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis0_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8531,12 +9096,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis0_neg_2,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis0_neg_2,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis0_neg_2);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis0_neg_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim4_axis0_neg_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis0_neg_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis0_neg_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis0_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8546,12 +9111,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis1_2,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis1_2,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis1_2);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis1_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim4_axis1_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis1_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis1_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis1_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8561,12 +9126,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis1_neg_2,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis1_neg_2,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis1_neg_2);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis1_neg_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim4_axis1_neg_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis1_neg_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis1_neg_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis1_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8576,12 +9141,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis2_2,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis2_2,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis2_2);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis2_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim4_axis2_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis2_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis2_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis2_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8591,12 +9156,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis2_neg_2,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis2_neg_2,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis2_neg_2);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis2_neg_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim4_axis2_neg_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis2_neg_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis2_neg_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis2_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8606,12 +9171,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis3_2,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis3_2,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis3_2);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis3_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim4_axis3_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis3_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis3_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis3_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8621,12 +9186,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis3_neg_2,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis3_neg_2,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis3_neg_2);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis3_neg_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim4_axis3_neg_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis3_neg_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis3_neg_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis3_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8636,12 +9201,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis0_2,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis0_2,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis0_2);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis0_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim3_axis0_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis0_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis0_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis0_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8651,12 +9216,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis0_neg_2,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis0_neg_2,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis0_neg_2);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis0_neg_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim3_axis0_neg_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis0_neg_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis0_neg_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis0_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8666,12 +9231,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis1_2,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis1_2,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis1_2);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis1_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim3_axis1_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis1_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis1_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis1_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8681,12 +9246,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis1_neg_2,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis1_neg_2,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis1_neg_2);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis1_neg_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim3_axis1_neg_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis1_neg_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis1_neg_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis1_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8696,12 +9261,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis2_2,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis2_2,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis2_2);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis2_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim3_axis2_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis2_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis2_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis2_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8711,12 +9276,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis2_neg_2,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis2_neg_2,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis2_neg_2);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis2_neg_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim3_axis2_neg_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis2_neg_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis2_neg_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis2_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8726,12 +9291,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis0_2,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis0_2,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis0_2);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis0_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim2_axis0_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis0_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis0_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis0_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8741,12 +9306,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis0_neg_2,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis0_neg_2,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis0_neg_2);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis0_neg_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim2_axis0_neg_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis0_neg_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis0_neg_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis0_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8756,12 +9321,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis1_2,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis1_2,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis1_2);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis1_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim2_axis1_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis1_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis1_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis1_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8771,12 +9336,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis1_neg_2,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis1_neg_2,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis1_neg_2);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis1_neg_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim2_axis1_neg_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis1_neg_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis1_neg_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis1_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8786,12 +9351,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim1_axis0_2,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim1_axis0_2,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim1_axis0_2);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim1_axis0_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim1_axis0_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim1_axis0_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim1_axis0_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim1_axis0_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8801,12 +9366,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim1_axis0_neg_2,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim1_axis0_neg_2,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim1_axis0_neg_2);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim1_axis0_neg_2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim1_axis0_neg_2) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim1_axis0_neg_2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim1_axis0_neg_2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim1_axis0_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8816,12 +9381,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim4_axis0_3,
                            local_response_normalization_v1_2::is_ignored_axis_dim4_axis0_3,
-                           local_response_normalization_v1_2::examples_axis_dim4_axis0_3);
+                           local_response_normalization_v1_2::get_examples_axis_dim4_axis0_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim4_axis0_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim4_axis0_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim4_axis0_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim4_axis0_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8831,12 +9396,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim4_axis0_neg_3,
                            local_response_normalization_v1_2::is_ignored_axis_dim4_axis0_neg_3,
-                           local_response_normalization_v1_2::examples_axis_dim4_axis0_neg_3);
+                           local_response_normalization_v1_2::get_examples_axis_dim4_axis0_neg_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim4_axis0_neg_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim4_axis0_neg_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim4_axis0_neg_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim4_axis0_neg_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8846,12 +9411,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim4_axis1_3,
                            local_response_normalization_v1_2::is_ignored_axis_dim4_axis1_3,
-                           local_response_normalization_v1_2::examples_axis_dim4_axis1_3);
+                           local_response_normalization_v1_2::get_examples_axis_dim4_axis1_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim4_axis1_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim4_axis1_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim4_axis1_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim4_axis1_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8861,12 +9426,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim4_axis1_neg_3,
                            local_response_normalization_v1_2::is_ignored_axis_dim4_axis1_neg_3,
-                           local_response_normalization_v1_2::examples_axis_dim4_axis1_neg_3);
+                           local_response_normalization_v1_2::get_examples_axis_dim4_axis1_neg_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim4_axis1_neg_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim4_axis1_neg_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim4_axis1_neg_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim4_axis1_neg_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8876,12 +9441,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim4_axis2_3,
                            local_response_normalization_v1_2::is_ignored_axis_dim4_axis2_3,
-                           local_response_normalization_v1_2::examples_axis_dim4_axis2_3);
+                           local_response_normalization_v1_2::get_examples_axis_dim4_axis2_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim4_axis2_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim4_axis2_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim4_axis2_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim4_axis2_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8891,12 +9456,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim4_axis2_neg_3,
                            local_response_normalization_v1_2::is_ignored_axis_dim4_axis2_neg_3,
-                           local_response_normalization_v1_2::examples_axis_dim4_axis2_neg_3);
+                           local_response_normalization_v1_2::get_examples_axis_dim4_axis2_neg_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim4_axis2_neg_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim4_axis2_neg_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim4_axis2_neg_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim4_axis2_neg_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8906,12 +9471,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim4_axis3_3,
                            local_response_normalization_v1_2::is_ignored_axis_dim4_axis3_3,
-                           local_response_normalization_v1_2::examples_axis_dim4_axis3_3);
+                           local_response_normalization_v1_2::get_examples_axis_dim4_axis3_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim4_axis3_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim4_axis3_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim4_axis3_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim4_axis3_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8921,12 +9486,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim4_axis3_neg_3,
                            local_response_normalization_v1_2::is_ignored_axis_dim4_axis3_neg_3,
-                           local_response_normalization_v1_2::examples_axis_dim4_axis3_neg_3);
+                           local_response_normalization_v1_2::get_examples_axis_dim4_axis3_neg_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim4_axis3_neg_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim4_axis3_neg_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim4_axis3_neg_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim4_axis3_neg_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8936,12 +9501,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim3_axis0_3,
                            local_response_normalization_v1_2::is_ignored_axis_dim3_axis0_3,
-                           local_response_normalization_v1_2::examples_axis_dim3_axis0_3);
+                           local_response_normalization_v1_2::get_examples_axis_dim3_axis0_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim3_axis0_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim3_axis0_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim3_axis0_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim3_axis0_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8951,12 +9516,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim3_axis0_neg_3,
                            local_response_normalization_v1_2::is_ignored_axis_dim3_axis0_neg_3,
-                           local_response_normalization_v1_2::examples_axis_dim3_axis0_neg_3);
+                           local_response_normalization_v1_2::get_examples_axis_dim3_axis0_neg_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim3_axis0_neg_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim3_axis0_neg_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim3_axis0_neg_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim3_axis0_neg_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8966,12 +9531,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim3_axis1_3,
                            local_response_normalization_v1_2::is_ignored_axis_dim3_axis1_3,
-                           local_response_normalization_v1_2::examples_axis_dim3_axis1_3);
+                           local_response_normalization_v1_2::get_examples_axis_dim3_axis1_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim3_axis1_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim3_axis1_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim3_axis1_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim3_axis1_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8981,12 +9546,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim3_axis1_neg_3,
                            local_response_normalization_v1_2::is_ignored_axis_dim3_axis1_neg_3,
-                           local_response_normalization_v1_2::examples_axis_dim3_axis1_neg_3);
+                           local_response_normalization_v1_2::get_examples_axis_dim3_axis1_neg_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim3_axis1_neg_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim3_axis1_neg_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim3_axis1_neg_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim3_axis1_neg_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -8996,12 +9561,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim3_axis2_3,
                            local_response_normalization_v1_2::is_ignored_axis_dim3_axis2_3,
-                           local_response_normalization_v1_2::examples_axis_dim3_axis2_3);
+                           local_response_normalization_v1_2::get_examples_axis_dim3_axis2_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim3_axis2_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim3_axis2_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim3_axis2_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim3_axis2_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9011,12 +9576,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim3_axis2_neg_3,
                            local_response_normalization_v1_2::is_ignored_axis_dim3_axis2_neg_3,
-                           local_response_normalization_v1_2::examples_axis_dim3_axis2_neg_3);
+                           local_response_normalization_v1_2::get_examples_axis_dim3_axis2_neg_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim3_axis2_neg_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim3_axis2_neg_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim3_axis2_neg_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim3_axis2_neg_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9026,12 +9591,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim2_axis0_3,
                            local_response_normalization_v1_2::is_ignored_axis_dim2_axis0_3,
-                           local_response_normalization_v1_2::examples_axis_dim2_axis0_3);
+                           local_response_normalization_v1_2::get_examples_axis_dim2_axis0_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim2_axis0_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim2_axis0_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim2_axis0_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim2_axis0_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9041,12 +9606,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim2_axis0_neg_3,
                            local_response_normalization_v1_2::is_ignored_axis_dim2_axis0_neg_3,
-                           local_response_normalization_v1_2::examples_axis_dim2_axis0_neg_3);
+                           local_response_normalization_v1_2::get_examples_axis_dim2_axis0_neg_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim2_axis0_neg_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim2_axis0_neg_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim2_axis0_neg_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim2_axis0_neg_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9056,12 +9621,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim2_axis1_3,
                            local_response_normalization_v1_2::is_ignored_axis_dim2_axis1_3,
-                           local_response_normalization_v1_2::examples_axis_dim2_axis1_3);
+                           local_response_normalization_v1_2::get_examples_axis_dim2_axis1_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim2_axis1_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim2_axis1_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim2_axis1_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim2_axis1_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9071,12 +9636,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim2_axis1_neg_3,
                            local_response_normalization_v1_2::is_ignored_axis_dim2_axis1_neg_3,
-                           local_response_normalization_v1_2::examples_axis_dim2_axis1_neg_3);
+                           local_response_normalization_v1_2::get_examples_axis_dim2_axis1_neg_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim2_axis1_neg_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim2_axis1_neg_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim2_axis1_neg_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim2_axis1_neg_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9086,12 +9651,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim1_axis0_3,
                            local_response_normalization_v1_2::is_ignored_axis_dim1_axis0_3,
-                           local_response_normalization_v1_2::examples_axis_dim1_axis0_3);
+                           local_response_normalization_v1_2::get_examples_axis_dim1_axis0_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim1_axis0_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim1_axis0_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim1_axis0_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim1_axis0_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9101,12 +9666,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_dim1_axis0_neg_3,
                            local_response_normalization_v1_2::is_ignored_axis_dim1_axis0_neg_3,
-                           local_response_normalization_v1_2::examples_axis_dim1_axis0_neg_3);
+                           local_response_normalization_v1_2::get_examples_axis_dim1_axis0_neg_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_dim1_axis0_neg_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_dim1_axis0_neg_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_dim1_axis0_neg_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_dim1_axis0_neg_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9116,12 +9681,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis0_3,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis0_3,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis0_3);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis0_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim4_axis0_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis0_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis0_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis0_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9131,12 +9696,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis0_neg_3,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis0_neg_3,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis0_neg_3);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis0_neg_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim4_axis0_neg_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis0_neg_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis0_neg_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis0_neg_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9146,12 +9711,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis1_3,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis1_3,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis1_3);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis1_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim4_axis1_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis1_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis1_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis1_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9161,12 +9726,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis1_neg_3,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis1_neg_3,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis1_neg_3);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis1_neg_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim4_axis1_neg_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis1_neg_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis1_neg_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis1_neg_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9176,12 +9741,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis2_3,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis2_3,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis2_3);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis2_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim4_axis2_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis2_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis2_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis2_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9191,12 +9756,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis2_neg_3,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis2_neg_3,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis2_neg_3);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis2_neg_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim4_axis2_neg_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis2_neg_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis2_neg_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis2_neg_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9206,12 +9771,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis3_3,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis3_3,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis3_3);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis3_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim4_axis3_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis3_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis3_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis3_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9221,12 +9786,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis3_neg_3,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis3_neg_3,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis3_neg_3);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis3_neg_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim4_axis3_neg_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim4_axis3_neg_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis3_neg_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis3_neg_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9236,12 +9801,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis0_3,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis0_3,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis0_3);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis0_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim3_axis0_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis0_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis0_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis0_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9251,12 +9816,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis0_neg_3,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis0_neg_3,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis0_neg_3);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis0_neg_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim3_axis0_neg_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis0_neg_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis0_neg_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis0_neg_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9266,12 +9831,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis1_3,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis1_3,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis1_3);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis1_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim3_axis1_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis1_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis1_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis1_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9281,12 +9846,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis1_neg_3,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis1_neg_3,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis1_neg_3);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis1_neg_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim3_axis1_neg_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis1_neg_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis1_neg_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis1_neg_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9296,12 +9861,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis2_3,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis2_3,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis2_3);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis2_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim3_axis2_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis2_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis2_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis2_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9311,12 +9876,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis2_neg_3,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis2_neg_3,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis2_neg_3);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis2_neg_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim3_axis2_neg_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim3_axis2_neg_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis2_neg_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis2_neg_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9326,12 +9891,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis0_3,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis0_3,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis0_3);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis0_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim2_axis0_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis0_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis0_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis0_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9341,12 +9906,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis0_neg_3,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis0_neg_3,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis0_neg_3);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis0_neg_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim2_axis0_neg_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis0_neg_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis0_neg_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis0_neg_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9356,12 +9921,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis1_3,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis1_3,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis1_3);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis1_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim2_axis1_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis1_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis1_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis1_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9371,12 +9936,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis1_neg_3,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis1_neg_3,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis1_neg_3);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis1_neg_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim2_axis1_neg_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim2_axis1_neg_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis1_neg_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis1_neg_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9386,12 +9951,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim1_axis0_3,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim1_axis0_3,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim1_axis0_3);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim1_axis0_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim1_axis0_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim1_axis0_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim1_axis0_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim1_axis0_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9401,12 +9966,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_axis_relaxed_dim1_axis0_neg_3,
                            local_response_normalization_v1_2::is_ignored_axis_relaxed_dim1_axis0_neg_3,
-                           local_response_normalization_v1_2::examples_axis_relaxed_dim1_axis0_neg_3);
+                           local_response_normalization_v1_2::get_examples_axis_relaxed_dim1_axis0_neg_3());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_axis_relaxed_dim1_axis0_neg_3) {
   const Model model = local_response_normalization_v1_2::createTestModel_axis_relaxed_dim1_axis0_neg_3();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_axis_relaxed_dim1_axis0_neg_3);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_axis_relaxed_dim1_axis0_neg_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9416,12 +9981,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_dim1_axis0,
                            local_response_normalization_v1_2::is_ignored_dim1_axis0,
-                           local_response_normalization_v1_2::examples_dim1_axis0);
+                           local_response_normalization_v1_2::get_examples_dim1_axis0());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_dim1_axis0) {
   const Model model = local_response_normalization_v1_2::createTestModel_dim1_axis0();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_dim1_axis0);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_dim1_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9431,12 +9996,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_dim2_axis1,
                            local_response_normalization_v1_2::is_ignored_dim2_axis1,
-                           local_response_normalization_v1_2::examples_dim2_axis1);
+                           local_response_normalization_v1_2::get_examples_dim2_axis1());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_dim2_axis1) {
   const Model model = local_response_normalization_v1_2::createTestModel_dim2_axis1();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_dim2_axis1);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_dim2_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9446,12 +10011,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_dim3_axis2,
                            local_response_normalization_v1_2::is_ignored_dim3_axis2,
-                           local_response_normalization_v1_2::examples_dim3_axis2);
+                           local_response_normalization_v1_2::get_examples_dim3_axis2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_dim3_axis2) {
   const Model model = local_response_normalization_v1_2::createTestModel_dim3_axis2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_dim3_axis2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_dim3_axis2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9461,12 +10026,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_relaxed_dim1_axis0,
                            local_response_normalization_v1_2::is_ignored_relaxed_dim1_axis0,
-                           local_response_normalization_v1_2::examples_relaxed_dim1_axis0);
+                           local_response_normalization_v1_2::get_examples_relaxed_dim1_axis0());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_relaxed_dim1_axis0) {
   const Model model = local_response_normalization_v1_2::createTestModel_relaxed_dim1_axis0();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_relaxed_dim1_axis0);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_relaxed_dim1_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9476,12 +10041,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_relaxed_dim2_axis1,
                            local_response_normalization_v1_2::is_ignored_relaxed_dim2_axis1,
-                           local_response_normalization_v1_2::examples_relaxed_dim2_axis1);
+                           local_response_normalization_v1_2::get_examples_relaxed_dim2_axis1());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_relaxed_dim2_axis1) {
   const Model model = local_response_normalization_v1_2::createTestModel_relaxed_dim2_axis1();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_relaxed_dim2_axis1);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_relaxed_dim2_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9491,12 +10056,12 @@
   generated_tests::Execute(device,
                            local_response_normalization_v1_2::createTestModel_relaxed_dim3_axis2,
                            local_response_normalization_v1_2::is_ignored_relaxed_dim3_axis2,
-                           local_response_normalization_v1_2::examples_relaxed_dim3_axis2);
+                           local_response_normalization_v1_2::get_examples_relaxed_dim3_axis2());
 }
 
 TEST_F(ValidationTest, local_response_normalization_v1_2_relaxed_dim3_axis2) {
   const Model model = local_response_normalization_v1_2::createTestModel_relaxed_dim3_axis2();
-  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::examples_relaxed_dim3_axis2);
+  const std::vector<Request> requests = createRequests(local_response_normalization_v1_2::get_examples_relaxed_dim3_axis2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9514,12 +10079,12 @@
   generated_tests::Execute(device,
                            logistic_float16_1::createTestModel,
                            logistic_float16_1::is_ignored,
-                           logistic_float16_1::examples);
+                           logistic_float16_1::get_examples());
 }
 
 TEST_F(ValidationTest, logistic_float16_1) {
   const Model model = logistic_float16_1::createTestModel();
-  const std::vector<Request> requests = createRequests(logistic_float16_1::examples);
+  const std::vector<Request> requests = createRequests(logistic_float16_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9537,12 +10102,12 @@
   generated_tests::Execute(device,
                            logistic_float16_2::createTestModel,
                            logistic_float16_2::is_ignored,
-                           logistic_float16_2::examples);
+                           logistic_float16_2::get_examples());
 }
 
 TEST_F(ValidationTest, logistic_float16_2) {
   const Model model = logistic_float16_2::createTestModel();
-  const std::vector<Request> requests = createRequests(logistic_float16_2::examples);
+  const std::vector<Request> requests = createRequests(logistic_float16_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9560,12 +10125,12 @@
   generated_tests::Execute(device,
                            lsh_projection_3_relaxed::createTestModel,
                            lsh_projection_3_relaxed::is_ignored,
-                           lsh_projection_3_relaxed::examples);
+                           lsh_projection_3_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, lsh_projection_3_relaxed) {
   const Model model = lsh_projection_3_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(lsh_projection_3_relaxed::examples);
+  const std::vector<Request> requests = createRequests(lsh_projection_3_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9583,12 +10148,12 @@
   generated_tests::Execute(device,
                            lsh_projection_4_relaxed::createTestModel,
                            lsh_projection_4_relaxed::is_ignored,
-                           lsh_projection_4_relaxed::examples);
+                           lsh_projection_4_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, lsh_projection_4_relaxed) {
   const Model model = lsh_projection_4_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(lsh_projection_4_relaxed::examples);
+  const std::vector<Request> requests = createRequests(lsh_projection_4_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9606,12 +10171,12 @@
   generated_tests::Execute(device,
                            lsh_projection_deprecated::createTestModel,
                            lsh_projection_deprecated::is_ignored,
-                           lsh_projection_deprecated::examples);
+                           lsh_projection_deprecated::get_examples());
 }
 
 TEST_F(ValidationTest, lsh_projection_deprecated) {
   const Model model = lsh_projection_deprecated::createTestModel();
-  const std::vector<Request> requests = createRequests(lsh_projection_deprecated::examples);
+  const std::vector<Request> requests = createRequests(lsh_projection_deprecated::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9629,12 +10194,12 @@
   generated_tests::Execute(device,
                            max_pool_v1_2::createTestModel_nhwc,
                            max_pool_v1_2::is_ignored_nhwc,
-                           max_pool_v1_2::examples_nhwc);
+                           max_pool_v1_2::get_examples_nhwc());
 }
 
 TEST_F(ValidationTest, max_pool_v1_2_nhwc) {
   const Model model = max_pool_v1_2::createTestModel_nhwc();
-  const std::vector<Request> requests = createRequests(max_pool_v1_2::examples_nhwc);
+  const std::vector<Request> requests = createRequests(max_pool_v1_2::get_examples_nhwc());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9644,12 +10209,12 @@
   generated_tests::Execute(device,
                            max_pool_v1_2::createTestModel_nhwc_relaxed,
                            max_pool_v1_2::is_ignored_nhwc_relaxed,
-                           max_pool_v1_2::examples_nhwc_relaxed);
+                           max_pool_v1_2::get_examples_nhwc_relaxed());
 }
 
 TEST_F(ValidationTest, max_pool_v1_2_nhwc_relaxed) {
   const Model model = max_pool_v1_2::createTestModel_nhwc_relaxed();
-  const std::vector<Request> requests = createRequests(max_pool_v1_2::examples_nhwc_relaxed);
+  const std::vector<Request> requests = createRequests(max_pool_v1_2::get_examples_nhwc_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9659,12 +10224,12 @@
   generated_tests::Execute(device,
                            max_pool_v1_2::createTestModel_nhwc_quant8,
                            max_pool_v1_2::is_ignored_nhwc_quant8,
-                           max_pool_v1_2::examples_nhwc_quant8);
+                           max_pool_v1_2::get_examples_nhwc_quant8());
 }
 
 TEST_F(ValidationTest, max_pool_v1_2_nhwc_quant8) {
   const Model model = max_pool_v1_2::createTestModel_nhwc_quant8();
-  const std::vector<Request> requests = createRequests(max_pool_v1_2::examples_nhwc_quant8);
+  const std::vector<Request> requests = createRequests(max_pool_v1_2::get_examples_nhwc_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9674,12 +10239,12 @@
   generated_tests::Execute(device,
                            max_pool_v1_2::createTestModel_nchw,
                            max_pool_v1_2::is_ignored_nchw,
-                           max_pool_v1_2::examples_nchw);
+                           max_pool_v1_2::get_examples_nchw());
 }
 
 TEST_F(ValidationTest, max_pool_v1_2_nchw) {
   const Model model = max_pool_v1_2::createTestModel_nchw();
-  const std::vector<Request> requests = createRequests(max_pool_v1_2::examples_nchw);
+  const std::vector<Request> requests = createRequests(max_pool_v1_2::get_examples_nchw());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9689,12 +10254,12 @@
   generated_tests::Execute(device,
                            max_pool_v1_2::createTestModel_nchw_relaxed,
                            max_pool_v1_2::is_ignored_nchw_relaxed,
-                           max_pool_v1_2::examples_nchw_relaxed);
+                           max_pool_v1_2::get_examples_nchw_relaxed());
 }
 
 TEST_F(ValidationTest, max_pool_v1_2_nchw_relaxed) {
   const Model model = max_pool_v1_2::createTestModel_nchw_relaxed();
-  const std::vector<Request> requests = createRequests(max_pool_v1_2::examples_nchw_relaxed);
+  const std::vector<Request> requests = createRequests(max_pool_v1_2::get_examples_nchw_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9704,12 +10269,12 @@
   generated_tests::Execute(device,
                            max_pool_v1_2::createTestModel_nchw_quant8,
                            max_pool_v1_2::is_ignored_nchw_quant8,
-                           max_pool_v1_2::examples_nchw_quant8);
+                           max_pool_v1_2::get_examples_nchw_quant8());
 }
 
 TEST_F(ValidationTest, max_pool_v1_2_nchw_quant8) {
   const Model model = max_pool_v1_2::createTestModel_nchw_quant8();
-  const std::vector<Request> requests = createRequests(max_pool_v1_2::examples_nchw_quant8);
+  const std::vector<Request> requests = createRequests(max_pool_v1_2::get_examples_nchw_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9719,12 +10284,12 @@
   generated_tests::Execute(device,
                            max_pool_v1_2::createTestModel_nhwc_2,
                            max_pool_v1_2::is_ignored_nhwc_2,
-                           max_pool_v1_2::examples_nhwc_2);
+                           max_pool_v1_2::get_examples_nhwc_2());
 }
 
 TEST_F(ValidationTest, max_pool_v1_2_nhwc_2) {
   const Model model = max_pool_v1_2::createTestModel_nhwc_2();
-  const std::vector<Request> requests = createRequests(max_pool_v1_2::examples_nhwc_2);
+  const std::vector<Request> requests = createRequests(max_pool_v1_2::get_examples_nhwc_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9734,12 +10299,12 @@
   generated_tests::Execute(device,
                            max_pool_v1_2::createTestModel_nhwc_relaxed_2,
                            max_pool_v1_2::is_ignored_nhwc_relaxed_2,
-                           max_pool_v1_2::examples_nhwc_relaxed_2);
+                           max_pool_v1_2::get_examples_nhwc_relaxed_2());
 }
 
 TEST_F(ValidationTest, max_pool_v1_2_nhwc_relaxed_2) {
   const Model model = max_pool_v1_2::createTestModel_nhwc_relaxed_2();
-  const std::vector<Request> requests = createRequests(max_pool_v1_2::examples_nhwc_relaxed_2);
+  const std::vector<Request> requests = createRequests(max_pool_v1_2::get_examples_nhwc_relaxed_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9749,12 +10314,12 @@
   generated_tests::Execute(device,
                            max_pool_v1_2::createTestModel_nhwc_quant8_2,
                            max_pool_v1_2::is_ignored_nhwc_quant8_2,
-                           max_pool_v1_2::examples_nhwc_quant8_2);
+                           max_pool_v1_2::get_examples_nhwc_quant8_2());
 }
 
 TEST_F(ValidationTest, max_pool_v1_2_nhwc_quant8_2) {
   const Model model = max_pool_v1_2::createTestModel_nhwc_quant8_2();
-  const std::vector<Request> requests = createRequests(max_pool_v1_2::examples_nhwc_quant8_2);
+  const std::vector<Request> requests = createRequests(max_pool_v1_2::get_examples_nhwc_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9764,12 +10329,12 @@
   generated_tests::Execute(device,
                            max_pool_v1_2::createTestModel_nchw_2,
                            max_pool_v1_2::is_ignored_nchw_2,
-                           max_pool_v1_2::examples_nchw_2);
+                           max_pool_v1_2::get_examples_nchw_2());
 }
 
 TEST_F(ValidationTest, max_pool_v1_2_nchw_2) {
   const Model model = max_pool_v1_2::createTestModel_nchw_2();
-  const std::vector<Request> requests = createRequests(max_pool_v1_2::examples_nchw_2);
+  const std::vector<Request> requests = createRequests(max_pool_v1_2::get_examples_nchw_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9779,12 +10344,12 @@
   generated_tests::Execute(device,
                            max_pool_v1_2::createTestModel_nchw_relaxed_2,
                            max_pool_v1_2::is_ignored_nchw_relaxed_2,
-                           max_pool_v1_2::examples_nchw_relaxed_2);
+                           max_pool_v1_2::get_examples_nchw_relaxed_2());
 }
 
 TEST_F(ValidationTest, max_pool_v1_2_nchw_relaxed_2) {
   const Model model = max_pool_v1_2::createTestModel_nchw_relaxed_2();
-  const std::vector<Request> requests = createRequests(max_pool_v1_2::examples_nchw_relaxed_2);
+  const std::vector<Request> requests = createRequests(max_pool_v1_2::get_examples_nchw_relaxed_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9794,12 +10359,12 @@
   generated_tests::Execute(device,
                            max_pool_v1_2::createTestModel_nchw_quant8_2,
                            max_pool_v1_2::is_ignored_nchw_quant8_2,
-                           max_pool_v1_2::examples_nchw_quant8_2);
+                           max_pool_v1_2::get_examples_nchw_quant8_2());
 }
 
 TEST_F(ValidationTest, max_pool_v1_2_nchw_quant8_2) {
   const Model model = max_pool_v1_2::createTestModel_nchw_quant8_2();
-  const std::vector<Request> requests = createRequests(max_pool_v1_2::examples_nchw_quant8_2);
+  const std::vector<Request> requests = createRequests(max_pool_v1_2::get_examples_nchw_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9809,12 +10374,12 @@
   generated_tests::Execute(device,
                            max_pool_v1_2::createTestModel_nhwc_3,
                            max_pool_v1_2::is_ignored_nhwc_3,
-                           max_pool_v1_2::examples_nhwc_3);
+                           max_pool_v1_2::get_examples_nhwc_3());
 }
 
 TEST_F(ValidationTest, max_pool_v1_2_nhwc_3) {
   const Model model = max_pool_v1_2::createTestModel_nhwc_3();
-  const std::vector<Request> requests = createRequests(max_pool_v1_2::examples_nhwc_3);
+  const std::vector<Request> requests = createRequests(max_pool_v1_2::get_examples_nhwc_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9824,12 +10389,12 @@
   generated_tests::Execute(device,
                            max_pool_v1_2::createTestModel_nhwc_relaxed_3,
                            max_pool_v1_2::is_ignored_nhwc_relaxed_3,
-                           max_pool_v1_2::examples_nhwc_relaxed_3);
+                           max_pool_v1_2::get_examples_nhwc_relaxed_3());
 }
 
 TEST_F(ValidationTest, max_pool_v1_2_nhwc_relaxed_3) {
   const Model model = max_pool_v1_2::createTestModel_nhwc_relaxed_3();
-  const std::vector<Request> requests = createRequests(max_pool_v1_2::examples_nhwc_relaxed_3);
+  const std::vector<Request> requests = createRequests(max_pool_v1_2::get_examples_nhwc_relaxed_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9839,12 +10404,12 @@
   generated_tests::Execute(device,
                            max_pool_v1_2::createTestModel_nhwc_quant8_3,
                            max_pool_v1_2::is_ignored_nhwc_quant8_3,
-                           max_pool_v1_2::examples_nhwc_quant8_3);
+                           max_pool_v1_2::get_examples_nhwc_quant8_3());
 }
 
 TEST_F(ValidationTest, max_pool_v1_2_nhwc_quant8_3) {
   const Model model = max_pool_v1_2::createTestModel_nhwc_quant8_3();
-  const std::vector<Request> requests = createRequests(max_pool_v1_2::examples_nhwc_quant8_3);
+  const std::vector<Request> requests = createRequests(max_pool_v1_2::get_examples_nhwc_quant8_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9854,12 +10419,12 @@
   generated_tests::Execute(device,
                            max_pool_v1_2::createTestModel_nchw_3,
                            max_pool_v1_2::is_ignored_nchw_3,
-                           max_pool_v1_2::examples_nchw_3);
+                           max_pool_v1_2::get_examples_nchw_3());
 }
 
 TEST_F(ValidationTest, max_pool_v1_2_nchw_3) {
   const Model model = max_pool_v1_2::createTestModel_nchw_3();
-  const std::vector<Request> requests = createRequests(max_pool_v1_2::examples_nchw_3);
+  const std::vector<Request> requests = createRequests(max_pool_v1_2::get_examples_nchw_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9869,12 +10434,12 @@
   generated_tests::Execute(device,
                            max_pool_v1_2::createTestModel_nchw_relaxed_3,
                            max_pool_v1_2::is_ignored_nchw_relaxed_3,
-                           max_pool_v1_2::examples_nchw_relaxed_3);
+                           max_pool_v1_2::get_examples_nchw_relaxed_3());
 }
 
 TEST_F(ValidationTest, max_pool_v1_2_nchw_relaxed_3) {
   const Model model = max_pool_v1_2::createTestModel_nchw_relaxed_3();
-  const std::vector<Request> requests = createRequests(max_pool_v1_2::examples_nchw_relaxed_3);
+  const std::vector<Request> requests = createRequests(max_pool_v1_2::get_examples_nchw_relaxed_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9884,12 +10449,12 @@
   generated_tests::Execute(device,
                            max_pool_v1_2::createTestModel_nchw_quant8_3,
                            max_pool_v1_2::is_ignored_nchw_quant8_3,
-                           max_pool_v1_2::examples_nchw_quant8_3);
+                           max_pool_v1_2::get_examples_nchw_quant8_3());
 }
 
 TEST_F(ValidationTest, max_pool_v1_2_nchw_quant8_3) {
   const Model model = max_pool_v1_2::createTestModel_nchw_quant8_3();
-  const std::vector<Request> requests = createRequests(max_pool_v1_2::examples_nchw_quant8_3);
+  const std::vector<Request> requests = createRequests(max_pool_v1_2::get_examples_nchw_quant8_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9899,12 +10464,12 @@
   generated_tests::Execute(device,
                            max_pool_v1_2::createTestModel_nhwc_4,
                            max_pool_v1_2::is_ignored_nhwc_4,
-                           max_pool_v1_2::examples_nhwc_4);
+                           max_pool_v1_2::get_examples_nhwc_4());
 }
 
 TEST_F(ValidationTest, max_pool_v1_2_nhwc_4) {
   const Model model = max_pool_v1_2::createTestModel_nhwc_4();
-  const std::vector<Request> requests = createRequests(max_pool_v1_2::examples_nhwc_4);
+  const std::vector<Request> requests = createRequests(max_pool_v1_2::get_examples_nhwc_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9914,12 +10479,12 @@
   generated_tests::Execute(device,
                            max_pool_v1_2::createTestModel_nhwc_relaxed_4,
                            max_pool_v1_2::is_ignored_nhwc_relaxed_4,
-                           max_pool_v1_2::examples_nhwc_relaxed_4);
+                           max_pool_v1_2::get_examples_nhwc_relaxed_4());
 }
 
 TEST_F(ValidationTest, max_pool_v1_2_nhwc_relaxed_4) {
   const Model model = max_pool_v1_2::createTestModel_nhwc_relaxed_4();
-  const std::vector<Request> requests = createRequests(max_pool_v1_2::examples_nhwc_relaxed_4);
+  const std::vector<Request> requests = createRequests(max_pool_v1_2::get_examples_nhwc_relaxed_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9929,12 +10494,12 @@
   generated_tests::Execute(device,
                            max_pool_v1_2::createTestModel_nhwc_quant8_4,
                            max_pool_v1_2::is_ignored_nhwc_quant8_4,
-                           max_pool_v1_2::examples_nhwc_quant8_4);
+                           max_pool_v1_2::get_examples_nhwc_quant8_4());
 }
 
 TEST_F(ValidationTest, max_pool_v1_2_nhwc_quant8_4) {
   const Model model = max_pool_v1_2::createTestModel_nhwc_quant8_4();
-  const std::vector<Request> requests = createRequests(max_pool_v1_2::examples_nhwc_quant8_4);
+  const std::vector<Request> requests = createRequests(max_pool_v1_2::get_examples_nhwc_quant8_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9944,12 +10509,12 @@
   generated_tests::Execute(device,
                            max_pool_v1_2::createTestModel_nchw_4,
                            max_pool_v1_2::is_ignored_nchw_4,
-                           max_pool_v1_2::examples_nchw_4);
+                           max_pool_v1_2::get_examples_nchw_4());
 }
 
 TEST_F(ValidationTest, max_pool_v1_2_nchw_4) {
   const Model model = max_pool_v1_2::createTestModel_nchw_4();
-  const std::vector<Request> requests = createRequests(max_pool_v1_2::examples_nchw_4);
+  const std::vector<Request> requests = createRequests(max_pool_v1_2::get_examples_nchw_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9959,12 +10524,12 @@
   generated_tests::Execute(device,
                            max_pool_v1_2::createTestModel_nchw_relaxed_4,
                            max_pool_v1_2::is_ignored_nchw_relaxed_4,
-                           max_pool_v1_2::examples_nchw_relaxed_4);
+                           max_pool_v1_2::get_examples_nchw_relaxed_4());
 }
 
 TEST_F(ValidationTest, max_pool_v1_2_nchw_relaxed_4) {
   const Model model = max_pool_v1_2::createTestModel_nchw_relaxed_4();
-  const std::vector<Request> requests = createRequests(max_pool_v1_2::examples_nchw_relaxed_4);
+  const std::vector<Request> requests = createRequests(max_pool_v1_2::get_examples_nchw_relaxed_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9974,12 +10539,12 @@
   generated_tests::Execute(device,
                            max_pool_v1_2::createTestModel_nchw_quant8_4,
                            max_pool_v1_2::is_ignored_nchw_quant8_4,
-                           max_pool_v1_2::examples_nchw_quant8_4);
+                           max_pool_v1_2::get_examples_nchw_quant8_4());
 }
 
 TEST_F(ValidationTest, max_pool_v1_2_nchw_quant8_4) {
   const Model model = max_pool_v1_2::createTestModel_nchw_quant8_4();
-  const std::vector<Request> requests = createRequests(max_pool_v1_2::examples_nchw_quant8_4);
+  const std::vector<Request> requests = createRequests(max_pool_v1_2::get_examples_nchw_quant8_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -9997,12 +10562,12 @@
   generated_tests::Execute(device,
                            maximum::createTestModel,
                            maximum::is_ignored,
-                           maximum::examples_simple);
+                           maximum::get_examples_simple());
 }
 
 TEST_F(ValidationTest, maximum_simple) {
   const Model model = maximum::createTestModel();
-  const std::vector<Request> requests = createRequests(maximum::examples_simple);
+  const std::vector<Request> requests = createRequests(maximum::get_examples_simple());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10012,12 +10577,12 @@
   generated_tests::Execute(device,
                            maximum::createTestModel_relaxed,
                            maximum::is_ignored_relaxed,
-                           maximum::examples_simple_relaxed);
+                           maximum::get_examples_simple_relaxed());
 }
 
 TEST_F(ValidationTest, maximum_simple_relaxed) {
   const Model model = maximum::createTestModel_relaxed();
-  const std::vector<Request> requests = createRequests(maximum::examples_simple_relaxed);
+  const std::vector<Request> requests = createRequests(maximum::get_examples_simple_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10027,12 +10592,12 @@
   generated_tests::Execute(device,
                            maximum::createTestModel_quant8,
                            maximum::is_ignored_quant8,
-                           maximum::examples_simple_quant8);
+                           maximum::get_examples_simple_quant8());
 }
 
 TEST_F(ValidationTest, maximum_simple_quant8) {
   const Model model = maximum::createTestModel_quant8();
-  const std::vector<Request> requests = createRequests(maximum::examples_simple_quant8);
+  const std::vector<Request> requests = createRequests(maximum::get_examples_simple_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10042,12 +10607,12 @@
   generated_tests::Execute(device,
                            maximum::createTestModel_int32,
                            maximum::is_ignored_int32,
-                           maximum::examples_simple_int32);
+                           maximum::get_examples_simple_int32());
 }
 
 TEST_F(ValidationTest, maximum_simple_int32) {
   const Model model = maximum::createTestModel_int32();
-  const std::vector<Request> requests = createRequests(maximum::examples_simple_int32);
+  const std::vector<Request> requests = createRequests(maximum::get_examples_simple_int32());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10057,12 +10622,12 @@
   generated_tests::Execute(device,
                            maximum::createTestModel_float16,
                            maximum::is_ignored_float16,
-                           maximum::examples_simple_float16);
+                           maximum::get_examples_simple_float16());
 }
 
 TEST_F(ValidationTest, maximum_simple_float16) {
   const Model model = maximum::createTestModel_float16();
-  const std::vector<Request> requests = createRequests(maximum::examples_simple_float16);
+  const std::vector<Request> requests = createRequests(maximum::get_examples_simple_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10072,12 +10637,12 @@
   generated_tests::Execute(device,
                            maximum::createTestModel_2,
                            maximum::is_ignored_2,
-                           maximum::examples_broadcast);
+                           maximum::get_examples_broadcast());
 }
 
 TEST_F(ValidationTest, maximum_broadcast) {
   const Model model = maximum::createTestModel_2();
-  const std::vector<Request> requests = createRequests(maximum::examples_broadcast);
+  const std::vector<Request> requests = createRequests(maximum::get_examples_broadcast());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10087,12 +10652,12 @@
   generated_tests::Execute(device,
                            maximum::createTestModel_relaxed_2,
                            maximum::is_ignored_relaxed_2,
-                           maximum::examples_broadcast_relaxed);
+                           maximum::get_examples_broadcast_relaxed());
 }
 
 TEST_F(ValidationTest, maximum_broadcast_relaxed) {
   const Model model = maximum::createTestModel_relaxed_2();
-  const std::vector<Request> requests = createRequests(maximum::examples_broadcast_relaxed);
+  const std::vector<Request> requests = createRequests(maximum::get_examples_broadcast_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10102,12 +10667,12 @@
   generated_tests::Execute(device,
                            maximum::createTestModel_quant8_2,
                            maximum::is_ignored_quant8_2,
-                           maximum::examples_broadcast_quant8);
+                           maximum::get_examples_broadcast_quant8());
 }
 
 TEST_F(ValidationTest, maximum_broadcast_quant8) {
   const Model model = maximum::createTestModel_quant8_2();
-  const std::vector<Request> requests = createRequests(maximum::examples_broadcast_quant8);
+  const std::vector<Request> requests = createRequests(maximum::get_examples_broadcast_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10117,12 +10682,12 @@
   generated_tests::Execute(device,
                            maximum::createTestModel_int32_2,
                            maximum::is_ignored_int32_2,
-                           maximum::examples_broadcast_int32);
+                           maximum::get_examples_broadcast_int32());
 }
 
 TEST_F(ValidationTest, maximum_broadcast_int32) {
   const Model model = maximum::createTestModel_int32_2();
-  const std::vector<Request> requests = createRequests(maximum::examples_broadcast_int32);
+  const std::vector<Request> requests = createRequests(maximum::get_examples_broadcast_int32());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10132,12 +10697,12 @@
   generated_tests::Execute(device,
                            maximum::createTestModel_float16_2,
                            maximum::is_ignored_float16_2,
-                           maximum::examples_broadcast_float16);
+                           maximum::get_examples_broadcast_float16());
 }
 
 TEST_F(ValidationTest, maximum_broadcast_float16) {
   const Model model = maximum::createTestModel_float16_2();
-  const std::vector<Request> requests = createRequests(maximum::examples_broadcast_float16);
+  const std::vector<Request> requests = createRequests(maximum::get_examples_broadcast_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10155,12 +10720,12 @@
   generated_tests::Execute(device,
                            minimum::createTestModel,
                            minimum::is_ignored,
-                           minimum::examples_simple);
+                           minimum::get_examples_simple());
 }
 
 TEST_F(ValidationTest, minimum_simple) {
   const Model model = minimum::createTestModel();
-  const std::vector<Request> requests = createRequests(minimum::examples_simple);
+  const std::vector<Request> requests = createRequests(minimum::get_examples_simple());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10170,12 +10735,12 @@
   generated_tests::Execute(device,
                            minimum::createTestModel_relaxed,
                            minimum::is_ignored_relaxed,
-                           minimum::examples_simple_relaxed);
+                           minimum::get_examples_simple_relaxed());
 }
 
 TEST_F(ValidationTest, minimum_simple_relaxed) {
   const Model model = minimum::createTestModel_relaxed();
-  const std::vector<Request> requests = createRequests(minimum::examples_simple_relaxed);
+  const std::vector<Request> requests = createRequests(minimum::get_examples_simple_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10185,12 +10750,12 @@
   generated_tests::Execute(device,
                            minimum::createTestModel_quant8,
                            minimum::is_ignored_quant8,
-                           minimum::examples_simple_quant8);
+                           minimum::get_examples_simple_quant8());
 }
 
 TEST_F(ValidationTest, minimum_simple_quant8) {
   const Model model = minimum::createTestModel_quant8();
-  const std::vector<Request> requests = createRequests(minimum::examples_simple_quant8);
+  const std::vector<Request> requests = createRequests(minimum::get_examples_simple_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10200,12 +10765,12 @@
   generated_tests::Execute(device,
                            minimum::createTestModel_int32,
                            minimum::is_ignored_int32,
-                           minimum::examples_simple_int32);
+                           minimum::get_examples_simple_int32());
 }
 
 TEST_F(ValidationTest, minimum_simple_int32) {
   const Model model = minimum::createTestModel_int32();
-  const std::vector<Request> requests = createRequests(minimum::examples_simple_int32);
+  const std::vector<Request> requests = createRequests(minimum::get_examples_simple_int32());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10215,12 +10780,12 @@
   generated_tests::Execute(device,
                            minimum::createTestModel_float16,
                            minimum::is_ignored_float16,
-                           minimum::examples_simple_float16);
+                           minimum::get_examples_simple_float16());
 }
 
 TEST_F(ValidationTest, minimum_simple_float16) {
   const Model model = minimum::createTestModel_float16();
-  const std::vector<Request> requests = createRequests(minimum::examples_simple_float16);
+  const std::vector<Request> requests = createRequests(minimum::get_examples_simple_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10230,12 +10795,12 @@
   generated_tests::Execute(device,
                            minimum::createTestModel_2,
                            minimum::is_ignored_2,
-                           minimum::examples_broadcast);
+                           minimum::get_examples_broadcast());
 }
 
 TEST_F(ValidationTest, minimum_broadcast) {
   const Model model = minimum::createTestModel_2();
-  const std::vector<Request> requests = createRequests(minimum::examples_broadcast);
+  const std::vector<Request> requests = createRequests(minimum::get_examples_broadcast());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10245,12 +10810,12 @@
   generated_tests::Execute(device,
                            minimum::createTestModel_relaxed_2,
                            minimum::is_ignored_relaxed_2,
-                           minimum::examples_broadcast_relaxed);
+                           minimum::get_examples_broadcast_relaxed());
 }
 
 TEST_F(ValidationTest, minimum_broadcast_relaxed) {
   const Model model = minimum::createTestModel_relaxed_2();
-  const std::vector<Request> requests = createRequests(minimum::examples_broadcast_relaxed);
+  const std::vector<Request> requests = createRequests(minimum::get_examples_broadcast_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10260,12 +10825,12 @@
   generated_tests::Execute(device,
                            minimum::createTestModel_quant8_2,
                            minimum::is_ignored_quant8_2,
-                           minimum::examples_broadcast_quant8);
+                           minimum::get_examples_broadcast_quant8());
 }
 
 TEST_F(ValidationTest, minimum_broadcast_quant8) {
   const Model model = minimum::createTestModel_quant8_2();
-  const std::vector<Request> requests = createRequests(minimum::examples_broadcast_quant8);
+  const std::vector<Request> requests = createRequests(minimum::get_examples_broadcast_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10275,12 +10840,12 @@
   generated_tests::Execute(device,
                            minimum::createTestModel_int32_2,
                            minimum::is_ignored_int32_2,
-                           minimum::examples_broadcast_int32);
+                           minimum::get_examples_broadcast_int32());
 }
 
 TEST_F(ValidationTest, minimum_broadcast_int32) {
   const Model model = minimum::createTestModel_int32_2();
-  const std::vector<Request> requests = createRequests(minimum::examples_broadcast_int32);
+  const std::vector<Request> requests = createRequests(minimum::get_examples_broadcast_int32());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10290,12 +10855,12 @@
   generated_tests::Execute(device,
                            minimum::createTestModel_float16_2,
                            minimum::is_ignored_float16_2,
-                           minimum::examples_broadcast_float16);
+                           minimum::get_examples_broadcast_float16());
 }
 
 TEST_F(ValidationTest, minimum_broadcast_float16) {
   const Model model = minimum::createTestModel_float16_2();
-  const std::vector<Request> requests = createRequests(minimum::examples_broadcast_float16);
+  const std::vector<Request> requests = createRequests(minimum::get_examples_broadcast_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10313,12 +10878,12 @@
   generated_tests::Execute(device,
                            mul_broadcast_float16::createTestModel,
                            mul_broadcast_float16::is_ignored,
-                           mul_broadcast_float16::examples);
+                           mul_broadcast_float16::get_examples());
 }
 
 TEST_F(ValidationTest, mul_broadcast_float16) {
   const Model model = mul_broadcast_float16::createTestModel();
-  const std::vector<Request> requests = createRequests(mul_broadcast_float16::examples);
+  const std::vector<Request> requests = createRequests(mul_broadcast_float16::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10336,12 +10901,35 @@
   generated_tests::Execute(device,
                            mul_float16::createTestModel,
                            mul_float16::is_ignored,
-                           mul_float16::examples);
+                           mul_float16::get_examples());
 }
 
 TEST_F(ValidationTest, mul_float16) {
   const Model model = mul_float16::createTestModel();
-  const std::vector<Request> requests = createRequests(mul_float16::examples);
+  const std::vector<Request> requests = createRequests(mul_float16::get_examples());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+// Generated from: pad_float16.mod.py.
+namespace pad_float16 {
+// Generated pad_float16 test
+#include "examples/pad_float16.example.cpp"
+// Generated model constructor
+#include "vts_models/pad_float16.model.cpp"
+} // namespace pad_float16
+
+TEST_F(NeuralnetworksHidlTest, pad_float16) {
+  generated_tests::Execute(device,
+                           pad_float16::createTestModel,
+                           pad_float16::is_ignored,
+                           pad_float16::get_examples());
+}
+
+TEST_F(ValidationTest, pad_float16) {
+  const Model model = pad_float16::createTestModel();
+  const std::vector<Request> requests = createRequests(pad_float16::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10359,12 +10947,27 @@
   generated_tests::Execute(device,
                            pad_v2_1_float::createTestModel,
                            pad_v2_1_float::is_ignored,
-                           pad_v2_1_float::examples);
+                           pad_v2_1_float::get_examples());
 }
 
 TEST_F(ValidationTest, pad_v2_1_float) {
   const Model model = pad_v2_1_float::createTestModel();
-  const std::vector<Request> requests = createRequests(pad_v2_1_float::examples);
+  const std::vector<Request> requests = createRequests(pad_v2_1_float::get_examples());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, pad_v2_1_float_float16) {
+  generated_tests::Execute(device,
+                           pad_v2_1_float::createTestModel_float16,
+                           pad_v2_1_float::is_ignored_float16,
+                           pad_v2_1_float::get_examples_float16());
+}
+
+TEST_F(ValidationTest, pad_v2_1_float_float16) {
+  const Model model = pad_v2_1_float::createTestModel_float16();
+  const std::vector<Request> requests = createRequests(pad_v2_1_float::get_examples_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10382,12 +10985,12 @@
   generated_tests::Execute(device,
                            pad_v2_1_float_relaxed::createTestModel,
                            pad_v2_1_float_relaxed::is_ignored,
-                           pad_v2_1_float_relaxed::examples);
+                           pad_v2_1_float_relaxed::get_examples());
 }
 
 TEST_F(ValidationTest, pad_v2_1_float_relaxed) {
   const Model model = pad_v2_1_float_relaxed::createTestModel();
-  const std::vector<Request> requests = createRequests(pad_v2_1_float_relaxed::examples);
+  const std::vector<Request> requests = createRequests(pad_v2_1_float_relaxed::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10405,12 +11008,12 @@
   generated_tests::Execute(device,
                            pad_v2_1_quant8::createTestModel,
                            pad_v2_1_quant8::is_ignored,
-                           pad_v2_1_quant8::examples);
+                           pad_v2_1_quant8::get_examples());
 }
 
 TEST_F(ValidationTest, pad_v2_1_quant8) {
   const Model model = pad_v2_1_quant8::createTestModel();
-  const std::vector<Request> requests = createRequests(pad_v2_1_quant8::examples);
+  const std::vector<Request> requests = createRequests(pad_v2_1_quant8::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10428,12 +11031,12 @@
   generated_tests::Execute(device,
                            pow::createTestModel,
                            pow::is_ignored,
-                           pow::examples);
+                           pow::get_examples());
 }
 
 TEST_F(ValidationTest, pow) {
   const Model model = pow::createTestModel();
-  const std::vector<Request> requests = createRequests(pow::examples);
+  const std::vector<Request> requests = createRequests(pow::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10443,12 +11046,12 @@
   generated_tests::Execute(device,
                            pow::createTestModel_relaxed,
                            pow::is_ignored_relaxed,
-                           pow::examples_relaxed);
+                           pow::get_examples_relaxed());
 }
 
 TEST_F(ValidationTest, pow_relaxed) {
   const Model model = pow::createTestModel_relaxed();
-  const std::vector<Request> requests = createRequests(pow::examples_relaxed);
+  const std::vector<Request> requests = createRequests(pow::get_examples_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10458,12 +11061,12 @@
   generated_tests::Execute(device,
                            pow::createTestModel_2,
                            pow::is_ignored_2,
-                           pow::examples_2);
+                           pow::get_examples_2());
 }
 
 TEST_F(ValidationTest, pow_2) {
   const Model model = pow::createTestModel_2();
-  const std::vector<Request> requests = createRequests(pow::examples_2);
+  const std::vector<Request> requests = createRequests(pow::get_examples_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10473,12 +11076,12 @@
   generated_tests::Execute(device,
                            pow::createTestModel_relaxed_2,
                            pow::is_ignored_relaxed_2,
-                           pow::examples_relaxed_2);
+                           pow::get_examples_relaxed_2());
 }
 
 TEST_F(ValidationTest, pow_relaxed_2) {
   const Model model = pow::createTestModel_relaxed_2();
-  const std::vector<Request> requests = createRequests(pow::examples_relaxed_2);
+  const std::vector<Request> requests = createRequests(pow::get_examples_relaxed_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10488,12 +11091,12 @@
   generated_tests::Execute(device,
                            pow::createTestModel_3,
                            pow::is_ignored_3,
-                           pow::examples_3);
+                           pow::get_examples_3());
 }
 
 TEST_F(ValidationTest, pow_3) {
   const Model model = pow::createTestModel_3();
-  const std::vector<Request> requests = createRequests(pow::examples_3);
+  const std::vector<Request> requests = createRequests(pow::get_examples_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10503,12 +11106,12 @@
   generated_tests::Execute(device,
                            pow::createTestModel_relaxed_3,
                            pow::is_ignored_relaxed_3,
-                           pow::examples_relaxed_3);
+                           pow::get_examples_relaxed_3());
 }
 
 TEST_F(ValidationTest, pow_relaxed_3) {
   const Model model = pow::createTestModel_relaxed_3();
-  const std::vector<Request> requests = createRequests(pow::examples_relaxed_3);
+  const std::vector<Request> requests = createRequests(pow::get_examples_relaxed_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10526,12 +11129,12 @@
   generated_tests::Execute(device,
                            prelu::createTestModel,
                            prelu::is_ignored,
-                           prelu::examples);
+                           prelu::get_examples());
 }
 
 TEST_F(ValidationTest, prelu) {
   const Model model = prelu::createTestModel();
-  const std::vector<Request> requests = createRequests(prelu::examples);
+  const std::vector<Request> requests = createRequests(prelu::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10541,12 +11144,12 @@
   generated_tests::Execute(device,
                            prelu::createTestModel_relaxed,
                            prelu::is_ignored_relaxed,
-                           prelu::examples_relaxed);
+                           prelu::get_examples_relaxed());
 }
 
 TEST_F(ValidationTest, prelu_relaxed) {
   const Model model = prelu::createTestModel_relaxed();
-  const std::vector<Request> requests = createRequests(prelu::examples_relaxed);
+  const std::vector<Request> requests = createRequests(prelu::get_examples_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10556,12 +11159,12 @@
   generated_tests::Execute(device,
                            prelu::createTestModel_quant8,
                            prelu::is_ignored_quant8,
-                           prelu::examples_quant8);
+                           prelu::get_examples_quant8());
 }
 
 TEST_F(ValidationTest, prelu_quant8) {
   const Model model = prelu::createTestModel_quant8();
-  const std::vector<Request> requests = createRequests(prelu::examples_quant8);
+  const std::vector<Request> requests = createRequests(prelu::get_examples_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10571,12 +11174,12 @@
   generated_tests::Execute(device,
                            prelu::createTestModel_weight_as_input,
                            prelu::is_ignored_weight_as_input,
-                           prelu::examples_weight_as_input);
+                           prelu::get_examples_weight_as_input());
 }
 
 TEST_F(ValidationTest, prelu_weight_as_input) {
   const Model model = prelu::createTestModel_weight_as_input();
-  const std::vector<Request> requests = createRequests(prelu::examples_weight_as_input);
+  const std::vector<Request> requests = createRequests(prelu::get_examples_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10586,12 +11189,12 @@
   generated_tests::Execute(device,
                            prelu::createTestModel_weight_as_input_relaxed,
                            prelu::is_ignored_weight_as_input_relaxed,
-                           prelu::examples_weight_as_input_relaxed);
+                           prelu::get_examples_weight_as_input_relaxed());
 }
 
 TEST_F(ValidationTest, prelu_weight_as_input_relaxed) {
   const Model model = prelu::createTestModel_weight_as_input_relaxed();
-  const std::vector<Request> requests = createRequests(prelu::examples_weight_as_input_relaxed);
+  const std::vector<Request> requests = createRequests(prelu::get_examples_weight_as_input_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10601,12 +11204,12 @@
   generated_tests::Execute(device,
                            prelu::createTestModel_weight_as_input_quant8,
                            prelu::is_ignored_weight_as_input_quant8,
-                           prelu::examples_weight_as_input_quant8);
+                           prelu::get_examples_weight_as_input_quant8());
 }
 
 TEST_F(ValidationTest, prelu_weight_as_input_quant8) {
   const Model model = prelu::createTestModel_weight_as_input_quant8();
-  const std::vector<Request> requests = createRequests(prelu::examples_weight_as_input_quant8);
+  const std::vector<Request> requests = createRequests(prelu::get_examples_weight_as_input_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10624,12 +11227,12 @@
   generated_tests::Execute(device,
                            quantize::createTestModel_quant8,
                            quantize::is_ignored_quant8,
-                           quantize::examples_quant8);
+                           quantize::get_examples_quant8());
 }
 
 TEST_F(ValidationTest, quantize_quant8) {
   const Model model = quantize::createTestModel_quant8();
-  const std::vector<Request> requests = createRequests(quantize::examples_quant8);
+  const std::vector<Request> requests = createRequests(quantize::get_examples_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10639,12 +11242,12 @@
   generated_tests::Execute(device,
                            quantize::createTestModel_quant8_2,
                            quantize::is_ignored_quant8_2,
-                           quantize::examples_quant8_2);
+                           quantize::get_examples_quant8_2());
 }
 
 TEST_F(ValidationTest, quantize_quant8_2) {
   const Model model = quantize::createTestModel_quant8_2();
-  const std::vector<Request> requests = createRequests(quantize::examples_quant8_2);
+  const std::vector<Request> requests = createRequests(quantize::get_examples_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10654,12 +11257,12 @@
   generated_tests::Execute(device,
                            quantize::createTestModel_quant8_3,
                            quantize::is_ignored_quant8_3,
-                           quantize::examples_quant8_3);
+                           quantize::get_examples_quant8_3());
 }
 
 TEST_F(ValidationTest, quantize_quant8_3) {
   const Model model = quantize::createTestModel_quant8_3();
-  const std::vector<Request> requests = createRequests(quantize::examples_quant8_3);
+  const std::vector<Request> requests = createRequests(quantize::get_examples_quant8_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10669,12 +11272,12 @@
   generated_tests::Execute(device,
                            quantize::createTestModel_quant8_4,
                            quantize::is_ignored_quant8_4,
-                           quantize::examples_quant8_4);
+                           quantize::get_examples_quant8_4());
 }
 
 TEST_F(ValidationTest, quantize_quant8_4) {
   const Model model = quantize::createTestModel_quant8_4();
-  const std::vector<Request> requests = createRequests(quantize::examples_quant8_4);
+  const std::vector<Request> requests = createRequests(quantize::get_examples_quant8_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10692,12 +11295,12 @@
   generated_tests::Execute(device,
                            quantized_lstm::createTestModel,
                            quantized_lstm::is_ignored,
-                           quantized_lstm::examples);
+                           quantized_lstm::get_examples());
 }
 
 TEST_F(ValidationTest, quantized_lstm) {
   const Model model = quantized_lstm::createTestModel();
-  const std::vector<Request> requests = createRequests(quantized_lstm::examples);
+  const std::vector<Request> requests = createRequests(quantized_lstm::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10715,12 +11318,12 @@
   generated_tests::Execute(device,
                            random_multinomial::createTestModel,
                            random_multinomial::is_ignored,
-                           random_multinomial::examples);
+                           random_multinomial::get_examples());
 }
 
 TEST_F(ValidationTest, random_multinomial) {
   const Model model = random_multinomial::createTestModel();
-  const std::vector<Request> requests = createRequests(random_multinomial::examples);
+  const std::vector<Request> requests = createRequests(random_multinomial::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10738,12 +11341,12 @@
   generated_tests::Execute(device,
                            relu1_float16_1::createTestModel,
                            relu1_float16_1::is_ignored,
-                           relu1_float16_1::examples);
+                           relu1_float16_1::get_examples());
 }
 
 TEST_F(ValidationTest, relu1_float16_1) {
   const Model model = relu1_float16_1::createTestModel();
-  const std::vector<Request> requests = createRequests(relu1_float16_1::examples);
+  const std::vector<Request> requests = createRequests(relu1_float16_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10761,12 +11364,12 @@
   generated_tests::Execute(device,
                            relu1_float16_2::createTestModel,
                            relu1_float16_2::is_ignored,
-                           relu1_float16_2::examples);
+                           relu1_float16_2::get_examples());
 }
 
 TEST_F(ValidationTest, relu1_float16_2) {
   const Model model = relu1_float16_2::createTestModel();
-  const std::vector<Request> requests = createRequests(relu1_float16_2::examples);
+  const std::vector<Request> requests = createRequests(relu1_float16_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10784,12 +11387,12 @@
   generated_tests::Execute(device,
                            relu6_float16_1::createTestModel,
                            relu6_float16_1::is_ignored,
-                           relu6_float16_1::examples);
+                           relu6_float16_1::get_examples());
 }
 
 TEST_F(ValidationTest, relu6_float16_1) {
   const Model model = relu6_float16_1::createTestModel();
-  const std::vector<Request> requests = createRequests(relu6_float16_1::examples);
+  const std::vector<Request> requests = createRequests(relu6_float16_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10807,12 +11410,12 @@
   generated_tests::Execute(device,
                            relu6_float16_2::createTestModel,
                            relu6_float16_2::is_ignored,
-                           relu6_float16_2::examples);
+                           relu6_float16_2::get_examples());
 }
 
 TEST_F(ValidationTest, relu6_float16_2) {
   const Model model = relu6_float16_2::createTestModel();
-  const std::vector<Request> requests = createRequests(relu6_float16_2::examples);
+  const std::vector<Request> requests = createRequests(relu6_float16_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10830,12 +11433,12 @@
   generated_tests::Execute(device,
                            relu_float16_1::createTestModel,
                            relu_float16_1::is_ignored,
-                           relu_float16_1::examples);
+                           relu_float16_1::get_examples());
 }
 
 TEST_F(ValidationTest, relu_float16_1) {
   const Model model = relu_float16_1::createTestModel();
-  const std::vector<Request> requests = createRequests(relu_float16_1::examples);
+  const std::vector<Request> requests = createRequests(relu_float16_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10853,12 +11456,35 @@
   generated_tests::Execute(device,
                            relu_float16_2::createTestModel,
                            relu_float16_2::is_ignored,
-                           relu_float16_2::examples);
+                           relu_float16_2::get_examples());
 }
 
 TEST_F(ValidationTest, relu_float16_2) {
   const Model model = relu_float16_2::createTestModel();
-  const std::vector<Request> requests = createRequests(relu_float16_2::examples);
+  const std::vector<Request> requests = createRequests(relu_float16_2::get_examples());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+// Generated from: reshape_float16.mod.py.
+namespace reshape_float16 {
+// Generated reshape_float16 test
+#include "examples/reshape_float16.example.cpp"
+// Generated model constructor
+#include "vts_models/reshape_float16.model.cpp"
+} // namespace reshape_float16
+
+TEST_F(NeuralnetworksHidlTest, reshape_float16) {
+  generated_tests::Execute(device,
+                           reshape_float16::createTestModel,
+                           reshape_float16::is_ignored,
+                           reshape_float16::get_examples());
+}
+
+TEST_F(ValidationTest, reshape_float16) {
+  const Model model = reshape_float16::createTestModel();
+  const std::vector<Request> requests = createRequests(reshape_float16::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10876,12 +11502,12 @@
   generated_tests::Execute(device,
                            resize_bilinear_v1_2::createTestModel_nhwc,
                            resize_bilinear_v1_2::is_ignored_nhwc,
-                           resize_bilinear_v1_2::examples_nhwc);
+                           resize_bilinear_v1_2::get_examples_nhwc());
 }
 
 TEST_F(ValidationTest, resize_bilinear_v1_2_nhwc) {
   const Model model = resize_bilinear_v1_2::createTestModel_nhwc();
-  const std::vector<Request> requests = createRequests(resize_bilinear_v1_2::examples_nhwc);
+  const std::vector<Request> requests = createRequests(resize_bilinear_v1_2::get_examples_nhwc());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10891,12 +11517,27 @@
   generated_tests::Execute(device,
                            resize_bilinear_v1_2::createTestModel_nhwc_relaxed,
                            resize_bilinear_v1_2::is_ignored_nhwc_relaxed,
-                           resize_bilinear_v1_2::examples_nhwc_relaxed);
+                           resize_bilinear_v1_2::get_examples_nhwc_relaxed());
 }
 
 TEST_F(ValidationTest, resize_bilinear_v1_2_nhwc_relaxed) {
   const Model model = resize_bilinear_v1_2::createTestModel_nhwc_relaxed();
-  const std::vector<Request> requests = createRequests(resize_bilinear_v1_2::examples_nhwc_relaxed);
+  const std::vector<Request> requests = createRequests(resize_bilinear_v1_2::get_examples_nhwc_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, resize_bilinear_v1_2_nhwc_float16) {
+  generated_tests::Execute(device,
+                           resize_bilinear_v1_2::createTestModel_nhwc_float16,
+                           resize_bilinear_v1_2::is_ignored_nhwc_float16,
+                           resize_bilinear_v1_2::get_examples_nhwc_float16());
+}
+
+TEST_F(ValidationTest, resize_bilinear_v1_2_nhwc_float16) {
+  const Model model = resize_bilinear_v1_2::createTestModel_nhwc_float16();
+  const std::vector<Request> requests = createRequests(resize_bilinear_v1_2::get_examples_nhwc_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10906,12 +11547,12 @@
   generated_tests::Execute(device,
                            resize_bilinear_v1_2::createTestModel_nchw,
                            resize_bilinear_v1_2::is_ignored_nchw,
-                           resize_bilinear_v1_2::examples_nchw);
+                           resize_bilinear_v1_2::get_examples_nchw());
 }
 
 TEST_F(ValidationTest, resize_bilinear_v1_2_nchw) {
   const Model model = resize_bilinear_v1_2::createTestModel_nchw();
-  const std::vector<Request> requests = createRequests(resize_bilinear_v1_2::examples_nchw);
+  const std::vector<Request> requests = createRequests(resize_bilinear_v1_2::get_examples_nchw());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10921,12 +11562,27 @@
   generated_tests::Execute(device,
                            resize_bilinear_v1_2::createTestModel_nchw_relaxed,
                            resize_bilinear_v1_2::is_ignored_nchw_relaxed,
-                           resize_bilinear_v1_2::examples_nchw_relaxed);
+                           resize_bilinear_v1_2::get_examples_nchw_relaxed());
 }
 
 TEST_F(ValidationTest, resize_bilinear_v1_2_nchw_relaxed) {
   const Model model = resize_bilinear_v1_2::createTestModel_nchw_relaxed();
-  const std::vector<Request> requests = createRequests(resize_bilinear_v1_2::examples_nchw_relaxed);
+  const std::vector<Request> requests = createRequests(resize_bilinear_v1_2::get_examples_nchw_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, resize_bilinear_v1_2_nchw_float16) {
+  generated_tests::Execute(device,
+                           resize_bilinear_v1_2::createTestModel_nchw_float16,
+                           resize_bilinear_v1_2::is_ignored_nchw_float16,
+                           resize_bilinear_v1_2::get_examples_nchw_float16());
+}
+
+TEST_F(ValidationTest, resize_bilinear_v1_2_nchw_float16) {
+  const Model model = resize_bilinear_v1_2::createTestModel_nchw_float16();
+  const std::vector<Request> requests = createRequests(resize_bilinear_v1_2::get_examples_nchw_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10936,12 +11592,12 @@
   generated_tests::Execute(device,
                            resize_bilinear_v1_2::createTestModel_nhwc_2,
                            resize_bilinear_v1_2::is_ignored_nhwc_2,
-                           resize_bilinear_v1_2::examples_nhwc_2);
+                           resize_bilinear_v1_2::get_examples_nhwc_2());
 }
 
 TEST_F(ValidationTest, resize_bilinear_v1_2_nhwc_2) {
   const Model model = resize_bilinear_v1_2::createTestModel_nhwc_2();
-  const std::vector<Request> requests = createRequests(resize_bilinear_v1_2::examples_nhwc_2);
+  const std::vector<Request> requests = createRequests(resize_bilinear_v1_2::get_examples_nhwc_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10951,12 +11607,27 @@
   generated_tests::Execute(device,
                            resize_bilinear_v1_2::createTestModel_nhwc_relaxed_2,
                            resize_bilinear_v1_2::is_ignored_nhwc_relaxed_2,
-                           resize_bilinear_v1_2::examples_nhwc_relaxed_2);
+                           resize_bilinear_v1_2::get_examples_nhwc_relaxed_2());
 }
 
 TEST_F(ValidationTest, resize_bilinear_v1_2_nhwc_relaxed_2) {
   const Model model = resize_bilinear_v1_2::createTestModel_nhwc_relaxed_2();
-  const std::vector<Request> requests = createRequests(resize_bilinear_v1_2::examples_nhwc_relaxed_2);
+  const std::vector<Request> requests = createRequests(resize_bilinear_v1_2::get_examples_nhwc_relaxed_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, resize_bilinear_v1_2_nhwc_float16_2) {
+  generated_tests::Execute(device,
+                           resize_bilinear_v1_2::createTestModel_nhwc_float16_2,
+                           resize_bilinear_v1_2::is_ignored_nhwc_float16_2,
+                           resize_bilinear_v1_2::get_examples_nhwc_float16_2());
+}
+
+TEST_F(ValidationTest, resize_bilinear_v1_2_nhwc_float16_2) {
+  const Model model = resize_bilinear_v1_2::createTestModel_nhwc_float16_2();
+  const std::vector<Request> requests = createRequests(resize_bilinear_v1_2::get_examples_nhwc_float16_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10966,12 +11637,12 @@
   generated_tests::Execute(device,
                            resize_bilinear_v1_2::createTestModel_nchw_2,
                            resize_bilinear_v1_2::is_ignored_nchw_2,
-                           resize_bilinear_v1_2::examples_nchw_2);
+                           resize_bilinear_v1_2::get_examples_nchw_2());
 }
 
 TEST_F(ValidationTest, resize_bilinear_v1_2_nchw_2) {
   const Model model = resize_bilinear_v1_2::createTestModel_nchw_2();
-  const std::vector<Request> requests = createRequests(resize_bilinear_v1_2::examples_nchw_2);
+  const std::vector<Request> requests = createRequests(resize_bilinear_v1_2::get_examples_nchw_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -10981,12 +11652,27 @@
   generated_tests::Execute(device,
                            resize_bilinear_v1_2::createTestModel_nchw_relaxed_2,
                            resize_bilinear_v1_2::is_ignored_nchw_relaxed_2,
-                           resize_bilinear_v1_2::examples_nchw_relaxed_2);
+                           resize_bilinear_v1_2::get_examples_nchw_relaxed_2());
 }
 
 TEST_F(ValidationTest, resize_bilinear_v1_2_nchw_relaxed_2) {
   const Model model = resize_bilinear_v1_2::createTestModel_nchw_relaxed_2();
-  const std::vector<Request> requests = createRequests(resize_bilinear_v1_2::examples_nchw_relaxed_2);
+  const std::vector<Request> requests = createRequests(resize_bilinear_v1_2::get_examples_nchw_relaxed_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, resize_bilinear_v1_2_nchw_float16_2) {
+  generated_tests::Execute(device,
+                           resize_bilinear_v1_2::createTestModel_nchw_float16_2,
+                           resize_bilinear_v1_2::is_ignored_nchw_float16_2,
+                           resize_bilinear_v1_2::get_examples_nchw_float16_2());
+}
+
+TEST_F(ValidationTest, resize_bilinear_v1_2_nchw_float16_2) {
+  const Model model = resize_bilinear_v1_2::createTestModel_nchw_float16_2();
+  const std::vector<Request> requests = createRequests(resize_bilinear_v1_2::get_examples_nchw_float16_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11004,12 +11690,12 @@
   generated_tests::Execute(device,
                            roi_align::createTestModel_nhwc,
                            roi_align::is_ignored_nhwc,
-                           roi_align::examples_nhwc);
+                           roi_align::get_examples_nhwc());
 }
 
 TEST_F(ValidationTest, roi_align_nhwc) {
   const Model model = roi_align::createTestModel_nhwc();
-  const std::vector<Request> requests = createRequests(roi_align::examples_nhwc);
+  const std::vector<Request> requests = createRequests(roi_align::get_examples_nhwc());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11019,12 +11705,12 @@
   generated_tests::Execute(device,
                            roi_align::createTestModel_nhwc_relaxed,
                            roi_align::is_ignored_nhwc_relaxed,
-                           roi_align::examples_nhwc_relaxed);
+                           roi_align::get_examples_nhwc_relaxed());
 }
 
 TEST_F(ValidationTest, roi_align_nhwc_relaxed) {
   const Model model = roi_align::createTestModel_nhwc_relaxed();
-  const std::vector<Request> requests = createRequests(roi_align::examples_nhwc_relaxed);
+  const std::vector<Request> requests = createRequests(roi_align::get_examples_nhwc_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11034,12 +11720,12 @@
   generated_tests::Execute(device,
                            roi_align::createTestModel_nhwc_quant8,
                            roi_align::is_ignored_nhwc_quant8,
-                           roi_align::examples_nhwc_quant8);
+                           roi_align::get_examples_nhwc_quant8());
 }
 
 TEST_F(ValidationTest, roi_align_nhwc_quant8) {
   const Model model = roi_align::createTestModel_nhwc_quant8();
-  const std::vector<Request> requests = createRequests(roi_align::examples_nhwc_quant8);
+  const std::vector<Request> requests = createRequests(roi_align::get_examples_nhwc_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11049,12 +11735,12 @@
   generated_tests::Execute(device,
                            roi_align::createTestModel_nchw,
                            roi_align::is_ignored_nchw,
-                           roi_align::examples_nchw);
+                           roi_align::get_examples_nchw());
 }
 
 TEST_F(ValidationTest, roi_align_nchw) {
   const Model model = roi_align::createTestModel_nchw();
-  const std::vector<Request> requests = createRequests(roi_align::examples_nchw);
+  const std::vector<Request> requests = createRequests(roi_align::get_examples_nchw());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11064,12 +11750,12 @@
   generated_tests::Execute(device,
                            roi_align::createTestModel_nchw_relaxed,
                            roi_align::is_ignored_nchw_relaxed,
-                           roi_align::examples_nchw_relaxed);
+                           roi_align::get_examples_nchw_relaxed());
 }
 
 TEST_F(ValidationTest, roi_align_nchw_relaxed) {
   const Model model = roi_align::createTestModel_nchw_relaxed();
-  const std::vector<Request> requests = createRequests(roi_align::examples_nchw_relaxed);
+  const std::vector<Request> requests = createRequests(roi_align::get_examples_nchw_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11079,12 +11765,12 @@
   generated_tests::Execute(device,
                            roi_align::createTestModel_nchw_quant8,
                            roi_align::is_ignored_nchw_quant8,
-                           roi_align::examples_nchw_quant8);
+                           roi_align::get_examples_nchw_quant8());
 }
 
 TEST_F(ValidationTest, roi_align_nchw_quant8) {
   const Model model = roi_align::createTestModel_nchw_quant8();
-  const std::vector<Request> requests = createRequests(roi_align::examples_nchw_quant8);
+  const std::vector<Request> requests = createRequests(roi_align::get_examples_nchw_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11094,12 +11780,12 @@
   generated_tests::Execute(device,
                            roi_align::createTestModel_nhwc_2,
                            roi_align::is_ignored_nhwc_2,
-                           roi_align::examples_nhwc_2);
+                           roi_align::get_examples_nhwc_2());
 }
 
 TEST_F(ValidationTest, roi_align_nhwc_2) {
   const Model model = roi_align::createTestModel_nhwc_2();
-  const std::vector<Request> requests = createRequests(roi_align::examples_nhwc_2);
+  const std::vector<Request> requests = createRequests(roi_align::get_examples_nhwc_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11109,12 +11795,12 @@
   generated_tests::Execute(device,
                            roi_align::createTestModel_nhwc_relaxed_2,
                            roi_align::is_ignored_nhwc_relaxed_2,
-                           roi_align::examples_nhwc_relaxed_2);
+                           roi_align::get_examples_nhwc_relaxed_2());
 }
 
 TEST_F(ValidationTest, roi_align_nhwc_relaxed_2) {
   const Model model = roi_align::createTestModel_nhwc_relaxed_2();
-  const std::vector<Request> requests = createRequests(roi_align::examples_nhwc_relaxed_2);
+  const std::vector<Request> requests = createRequests(roi_align::get_examples_nhwc_relaxed_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11124,12 +11810,12 @@
   generated_tests::Execute(device,
                            roi_align::createTestModel_nhwc_quant8_2,
                            roi_align::is_ignored_nhwc_quant8_2,
-                           roi_align::examples_nhwc_quant8_2);
+                           roi_align::get_examples_nhwc_quant8_2());
 }
 
 TEST_F(ValidationTest, roi_align_nhwc_quant8_2) {
   const Model model = roi_align::createTestModel_nhwc_quant8_2();
-  const std::vector<Request> requests = createRequests(roi_align::examples_nhwc_quant8_2);
+  const std::vector<Request> requests = createRequests(roi_align::get_examples_nhwc_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11139,12 +11825,12 @@
   generated_tests::Execute(device,
                            roi_align::createTestModel_nchw_2,
                            roi_align::is_ignored_nchw_2,
-                           roi_align::examples_nchw_2);
+                           roi_align::get_examples_nchw_2());
 }
 
 TEST_F(ValidationTest, roi_align_nchw_2) {
   const Model model = roi_align::createTestModel_nchw_2();
-  const std::vector<Request> requests = createRequests(roi_align::examples_nchw_2);
+  const std::vector<Request> requests = createRequests(roi_align::get_examples_nchw_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11154,12 +11840,12 @@
   generated_tests::Execute(device,
                            roi_align::createTestModel_nchw_relaxed_2,
                            roi_align::is_ignored_nchw_relaxed_2,
-                           roi_align::examples_nchw_relaxed_2);
+                           roi_align::get_examples_nchw_relaxed_2());
 }
 
 TEST_F(ValidationTest, roi_align_nchw_relaxed_2) {
   const Model model = roi_align::createTestModel_nchw_relaxed_2();
-  const std::vector<Request> requests = createRequests(roi_align::examples_nchw_relaxed_2);
+  const std::vector<Request> requests = createRequests(roi_align::get_examples_nchw_relaxed_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11169,12 +11855,12 @@
   generated_tests::Execute(device,
                            roi_align::createTestModel_nchw_quant8_2,
                            roi_align::is_ignored_nchw_quant8_2,
-                           roi_align::examples_nchw_quant8_2);
+                           roi_align::get_examples_nchw_quant8_2());
 }
 
 TEST_F(ValidationTest, roi_align_nchw_quant8_2) {
   const Model model = roi_align::createTestModel_nchw_quant8_2();
-  const std::vector<Request> requests = createRequests(roi_align::examples_nchw_quant8_2);
+  const std::vector<Request> requests = createRequests(roi_align::get_examples_nchw_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11184,12 +11870,12 @@
   generated_tests::Execute(device,
                            roi_align::createTestModel_nhwc_3,
                            roi_align::is_ignored_nhwc_3,
-                           roi_align::examples_nhwc_3);
+                           roi_align::get_examples_nhwc_3());
 }
 
 TEST_F(ValidationTest, roi_align_nhwc_3) {
   const Model model = roi_align::createTestModel_nhwc_3();
-  const std::vector<Request> requests = createRequests(roi_align::examples_nhwc_3);
+  const std::vector<Request> requests = createRequests(roi_align::get_examples_nhwc_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11199,12 +11885,12 @@
   generated_tests::Execute(device,
                            roi_align::createTestModel_nhwc_relaxed_3,
                            roi_align::is_ignored_nhwc_relaxed_3,
-                           roi_align::examples_nhwc_relaxed_3);
+                           roi_align::get_examples_nhwc_relaxed_3());
 }
 
 TEST_F(ValidationTest, roi_align_nhwc_relaxed_3) {
   const Model model = roi_align::createTestModel_nhwc_relaxed_3();
-  const std::vector<Request> requests = createRequests(roi_align::examples_nhwc_relaxed_3);
+  const std::vector<Request> requests = createRequests(roi_align::get_examples_nhwc_relaxed_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11214,12 +11900,12 @@
   generated_tests::Execute(device,
                            roi_align::createTestModel_nhwc_quant8_3,
                            roi_align::is_ignored_nhwc_quant8_3,
-                           roi_align::examples_nhwc_quant8_3);
+                           roi_align::get_examples_nhwc_quant8_3());
 }
 
 TEST_F(ValidationTest, roi_align_nhwc_quant8_3) {
   const Model model = roi_align::createTestModel_nhwc_quant8_3();
-  const std::vector<Request> requests = createRequests(roi_align::examples_nhwc_quant8_3);
+  const std::vector<Request> requests = createRequests(roi_align::get_examples_nhwc_quant8_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11229,12 +11915,12 @@
   generated_tests::Execute(device,
                            roi_align::createTestModel_nchw_3,
                            roi_align::is_ignored_nchw_3,
-                           roi_align::examples_nchw_3);
+                           roi_align::get_examples_nchw_3());
 }
 
 TEST_F(ValidationTest, roi_align_nchw_3) {
   const Model model = roi_align::createTestModel_nchw_3();
-  const std::vector<Request> requests = createRequests(roi_align::examples_nchw_3);
+  const std::vector<Request> requests = createRequests(roi_align::get_examples_nchw_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11244,12 +11930,12 @@
   generated_tests::Execute(device,
                            roi_align::createTestModel_nchw_relaxed_3,
                            roi_align::is_ignored_nchw_relaxed_3,
-                           roi_align::examples_nchw_relaxed_3);
+                           roi_align::get_examples_nchw_relaxed_3());
 }
 
 TEST_F(ValidationTest, roi_align_nchw_relaxed_3) {
   const Model model = roi_align::createTestModel_nchw_relaxed_3();
-  const std::vector<Request> requests = createRequests(roi_align::examples_nchw_relaxed_3);
+  const std::vector<Request> requests = createRequests(roi_align::get_examples_nchw_relaxed_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11259,12 +11945,200 @@
   generated_tests::Execute(device,
                            roi_align::createTestModel_nchw_quant8_3,
                            roi_align::is_ignored_nchw_quant8_3,
-                           roi_align::examples_nchw_quant8_3);
+                           roi_align::get_examples_nchw_quant8_3());
 }
 
 TEST_F(ValidationTest, roi_align_nchw_quant8_3) {
   const Model model = roi_align::createTestModel_nchw_quant8_3();
-  const std::vector<Request> requests = createRequests(roi_align::examples_nchw_quant8_3);
+  const std::vector<Request> requests = createRequests(roi_align::get_examples_nchw_quant8_3());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+// Generated from: roi_pooling.mod.py.
+namespace roi_pooling {
+// Generated roi_pooling test
+#include "examples/roi_pooling.example.cpp"
+// Generated model constructor
+#include "vts_models/roi_pooling.model.cpp"
+} // namespace roi_pooling
+
+TEST_F(NeuralnetworksHidlTest, roi_pooling_nhwc) {
+  generated_tests::Execute(device,
+                           roi_pooling::createTestModel_nhwc,
+                           roi_pooling::is_ignored_nhwc,
+                           roi_pooling::get_examples_nhwc());
+}
+
+TEST_F(ValidationTest, roi_pooling_nhwc) {
+  const Model model = roi_pooling::createTestModel_nhwc();
+  const std::vector<Request> requests = createRequests(roi_pooling::get_examples_nhwc());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, roi_pooling_nhwc_relaxed) {
+  generated_tests::Execute(device,
+                           roi_pooling::createTestModel_nhwc_relaxed,
+                           roi_pooling::is_ignored_nhwc_relaxed,
+                           roi_pooling::get_examples_nhwc_relaxed());
+}
+
+TEST_F(ValidationTest, roi_pooling_nhwc_relaxed) {
+  const Model model = roi_pooling::createTestModel_nhwc_relaxed();
+  const std::vector<Request> requests = createRequests(roi_pooling::get_examples_nhwc_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, roi_pooling_nhwc_quant8) {
+  generated_tests::Execute(device,
+                           roi_pooling::createTestModel_nhwc_quant8,
+                           roi_pooling::is_ignored_nhwc_quant8,
+                           roi_pooling::get_examples_nhwc_quant8());
+}
+
+TEST_F(ValidationTest, roi_pooling_nhwc_quant8) {
+  const Model model = roi_pooling::createTestModel_nhwc_quant8();
+  const std::vector<Request> requests = createRequests(roi_pooling::get_examples_nhwc_quant8());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, roi_pooling_nchw) {
+  generated_tests::Execute(device,
+                           roi_pooling::createTestModel_nchw,
+                           roi_pooling::is_ignored_nchw,
+                           roi_pooling::get_examples_nchw());
+}
+
+TEST_F(ValidationTest, roi_pooling_nchw) {
+  const Model model = roi_pooling::createTestModel_nchw();
+  const std::vector<Request> requests = createRequests(roi_pooling::get_examples_nchw());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, roi_pooling_nchw_relaxed) {
+  generated_tests::Execute(device,
+                           roi_pooling::createTestModel_nchw_relaxed,
+                           roi_pooling::is_ignored_nchw_relaxed,
+                           roi_pooling::get_examples_nchw_relaxed());
+}
+
+TEST_F(ValidationTest, roi_pooling_nchw_relaxed) {
+  const Model model = roi_pooling::createTestModel_nchw_relaxed();
+  const std::vector<Request> requests = createRequests(roi_pooling::get_examples_nchw_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, roi_pooling_nchw_quant8) {
+  generated_tests::Execute(device,
+                           roi_pooling::createTestModel_nchw_quant8,
+                           roi_pooling::is_ignored_nchw_quant8,
+                           roi_pooling::get_examples_nchw_quant8());
+}
+
+TEST_F(ValidationTest, roi_pooling_nchw_quant8) {
+  const Model model = roi_pooling::createTestModel_nchw_quant8();
+  const std::vector<Request> requests = createRequests(roi_pooling::get_examples_nchw_quant8());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, roi_pooling_nhwc_2) {
+  generated_tests::Execute(device,
+                           roi_pooling::createTestModel_nhwc_2,
+                           roi_pooling::is_ignored_nhwc_2,
+                           roi_pooling::get_examples_nhwc_2());
+}
+
+TEST_F(ValidationTest, roi_pooling_nhwc_2) {
+  const Model model = roi_pooling::createTestModel_nhwc_2();
+  const std::vector<Request> requests = createRequests(roi_pooling::get_examples_nhwc_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, roi_pooling_nhwc_relaxed_2) {
+  generated_tests::Execute(device,
+                           roi_pooling::createTestModel_nhwc_relaxed_2,
+                           roi_pooling::is_ignored_nhwc_relaxed_2,
+                           roi_pooling::get_examples_nhwc_relaxed_2());
+}
+
+TEST_F(ValidationTest, roi_pooling_nhwc_relaxed_2) {
+  const Model model = roi_pooling::createTestModel_nhwc_relaxed_2();
+  const std::vector<Request> requests = createRequests(roi_pooling::get_examples_nhwc_relaxed_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, roi_pooling_nhwc_quant8_2) {
+  generated_tests::Execute(device,
+                           roi_pooling::createTestModel_nhwc_quant8_2,
+                           roi_pooling::is_ignored_nhwc_quant8_2,
+                           roi_pooling::get_examples_nhwc_quant8_2());
+}
+
+TEST_F(ValidationTest, roi_pooling_nhwc_quant8_2) {
+  const Model model = roi_pooling::createTestModel_nhwc_quant8_2();
+  const std::vector<Request> requests = createRequests(roi_pooling::get_examples_nhwc_quant8_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, roi_pooling_nchw_2) {
+  generated_tests::Execute(device,
+                           roi_pooling::createTestModel_nchw_2,
+                           roi_pooling::is_ignored_nchw_2,
+                           roi_pooling::get_examples_nchw_2());
+}
+
+TEST_F(ValidationTest, roi_pooling_nchw_2) {
+  const Model model = roi_pooling::createTestModel_nchw_2();
+  const std::vector<Request> requests = createRequests(roi_pooling::get_examples_nchw_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, roi_pooling_nchw_relaxed_2) {
+  generated_tests::Execute(device,
+                           roi_pooling::createTestModel_nchw_relaxed_2,
+                           roi_pooling::is_ignored_nchw_relaxed_2,
+                           roi_pooling::get_examples_nchw_relaxed_2());
+}
+
+TEST_F(ValidationTest, roi_pooling_nchw_relaxed_2) {
+  const Model model = roi_pooling::createTestModel_nchw_relaxed_2();
+  const std::vector<Request> requests = createRequests(roi_pooling::get_examples_nchw_relaxed_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, roi_pooling_nchw_quant8_2) {
+  generated_tests::Execute(device,
+                           roi_pooling::createTestModel_nchw_quant8_2,
+                           roi_pooling::is_ignored_nchw_quant8_2,
+                           roi_pooling::get_examples_nchw_quant8_2());
+}
+
+TEST_F(ValidationTest, roi_pooling_nchw_quant8_2) {
+  const Model model = roi_pooling::createTestModel_nchw_quant8_2();
+  const std::vector<Request> requests = createRequests(roi_pooling::get_examples_nchw_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11282,12 +12156,12 @@
   generated_tests::Execute(device,
                            rotated_bbox_transform::createTestModel,
                            rotated_bbox_transform::is_ignored,
-                           rotated_bbox_transform::examples);
+                           rotated_bbox_transform::get_examples());
 }
 
 TEST_F(ValidationTest, rotated_bbox_transform) {
   const Model model = rotated_bbox_transform::createTestModel();
-  const std::vector<Request> requests = createRequests(rotated_bbox_transform::examples);
+  const std::vector<Request> requests = createRequests(rotated_bbox_transform::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11297,12 +12171,12 @@
   generated_tests::Execute(device,
                            rotated_bbox_transform::createTestModel_relaxed,
                            rotated_bbox_transform::is_ignored_relaxed,
-                           rotated_bbox_transform::examples_relaxed);
+                           rotated_bbox_transform::get_examples_relaxed());
 }
 
 TEST_F(ValidationTest, rotated_bbox_transform_relaxed) {
   const Model model = rotated_bbox_transform::createTestModel_relaxed();
-  const std::vector<Request> requests = createRequests(rotated_bbox_transform::examples_relaxed);
+  const std::vector<Request> requests = createRequests(rotated_bbox_transform::get_examples_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11312,12 +12186,12 @@
   generated_tests::Execute(device,
                            rotated_bbox_transform::createTestModel_2,
                            rotated_bbox_transform::is_ignored_2,
-                           rotated_bbox_transform::examples_2);
+                           rotated_bbox_transform::get_examples_2());
 }
 
 TEST_F(ValidationTest, rotated_bbox_transform_2) {
   const Model model = rotated_bbox_transform::createTestModel_2();
-  const std::vector<Request> requests = createRequests(rotated_bbox_transform::examples_2);
+  const std::vector<Request> requests = createRequests(rotated_bbox_transform::get_examples_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11327,12 +12201,12 @@
   generated_tests::Execute(device,
                            rotated_bbox_transform::createTestModel_relaxed_2,
                            rotated_bbox_transform::is_ignored_relaxed_2,
-                           rotated_bbox_transform::examples_relaxed_2);
+                           rotated_bbox_transform::get_examples_relaxed_2());
 }
 
 TEST_F(ValidationTest, rotated_bbox_transform_relaxed_2) {
   const Model model = rotated_bbox_transform::createTestModel_relaxed_2();
-  const std::vector<Request> requests = createRequests(rotated_bbox_transform::examples_relaxed_2);
+  const std::vector<Request> requests = createRequests(rotated_bbox_transform::get_examples_relaxed_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11342,12 +12216,12 @@
   generated_tests::Execute(device,
                            rotated_bbox_transform::createTestModel_3,
                            rotated_bbox_transform::is_ignored_3,
-                           rotated_bbox_transform::examples_3);
+                           rotated_bbox_transform::get_examples_3());
 }
 
 TEST_F(ValidationTest, rotated_bbox_transform_3) {
   const Model model = rotated_bbox_transform::createTestModel_3();
-  const std::vector<Request> requests = createRequests(rotated_bbox_transform::examples_3);
+  const std::vector<Request> requests = createRequests(rotated_bbox_transform::get_examples_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11357,12 +12231,12 @@
   generated_tests::Execute(device,
                            rotated_bbox_transform::createTestModel_relaxed_3,
                            rotated_bbox_transform::is_ignored_relaxed_3,
-                           rotated_bbox_transform::examples_relaxed_3);
+                           rotated_bbox_transform::get_examples_relaxed_3());
 }
 
 TEST_F(ValidationTest, rotated_bbox_transform_relaxed_3) {
   const Model model = rotated_bbox_transform::createTestModel_relaxed_3();
-  const std::vector<Request> requests = createRequests(rotated_bbox_transform::examples_relaxed_3);
+  const std::vector<Request> requests = createRequests(rotated_bbox_transform::get_examples_relaxed_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11372,12 +12246,12 @@
   generated_tests::Execute(device,
                            rotated_bbox_transform::createTestModel_single_batch,
                            rotated_bbox_transform::is_ignored_single_batch,
-                           rotated_bbox_transform::examples_single_batch);
+                           rotated_bbox_transform::get_examples_single_batch());
 }
 
 TEST_F(ValidationTest, rotated_bbox_transform_single_batch) {
   const Model model = rotated_bbox_transform::createTestModel_single_batch();
-  const std::vector<Request> requests = createRequests(rotated_bbox_transform::examples_single_batch);
+  const std::vector<Request> requests = createRequests(rotated_bbox_transform::get_examples_single_batch());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11387,12 +12261,380 @@
   generated_tests::Execute(device,
                            rotated_bbox_transform::createTestModel_single_batch_relaxed,
                            rotated_bbox_transform::is_ignored_single_batch_relaxed,
-                           rotated_bbox_transform::examples_single_batch_relaxed);
+                           rotated_bbox_transform::get_examples_single_batch_relaxed());
 }
 
 TEST_F(ValidationTest, rotated_bbox_transform_single_batch_relaxed) {
   const Model model = rotated_bbox_transform::createTestModel_single_batch_relaxed();
-  const std::vector<Request> requests = createRequests(rotated_bbox_transform::examples_single_batch_relaxed);
+  const std::vector<Request> requests = createRequests(rotated_bbox_transform::get_examples_single_batch_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+// Generated from: slice.mod.py.
+namespace slice {
+// Generated slice test
+#include "examples/slice.example.cpp"
+// Generated model constructor
+#include "vts_models/slice.model.cpp"
+} // namespace slice
+
+TEST_F(NeuralnetworksHidlTest, slice) {
+  generated_tests::Execute(device,
+                           slice::createTestModel,
+                           slice::is_ignored,
+                           slice::examples);
+}
+
+TEST_F(ValidationTest, slice) {
+  const Model model = slice::createTestModel();
+  const std::vector<Request> requests = createRequests(slice::examples);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_relaxed) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_relaxed,
+                           slice::is_ignored_relaxed,
+                           slice::examples_relaxed);
+}
+
+TEST_F(ValidationTest, slice_relaxed) {
+  const Model model = slice::createTestModel_relaxed();
+  const std::vector<Request> requests = createRequests(slice::examples_relaxed);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_float16) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_float16,
+                           slice::is_ignored_float16,
+                           slice::examples_float16);
+}
+
+TEST_F(ValidationTest, slice_float16) {
+  const Model model = slice::createTestModel_float16();
+  const std::vector<Request> requests = createRequests(slice::examples_float16);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_2) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_2,
+                           slice::is_ignored_2,
+                           slice::examples_2);
+}
+
+TEST_F(ValidationTest, slice_2) {
+  const Model model = slice::createTestModel_2();
+  const std::vector<Request> requests = createRequests(slice::examples_2);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_relaxed_2) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_relaxed_2,
+                           slice::is_ignored_relaxed_2,
+                           slice::examples_relaxed_2);
+}
+
+TEST_F(ValidationTest, slice_relaxed_2) {
+  const Model model = slice::createTestModel_relaxed_2();
+  const std::vector<Request> requests = createRequests(slice::examples_relaxed_2);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_float16_2) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_float16_2,
+                           slice::is_ignored_float16_2,
+                           slice::examples_float16_2);
+}
+
+TEST_F(ValidationTest, slice_float16_2) {
+  const Model model = slice::createTestModel_float16_2();
+  const std::vector<Request> requests = createRequests(slice::examples_float16_2);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_3) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_3,
+                           slice::is_ignored_3,
+                           slice::examples_3);
+}
+
+TEST_F(ValidationTest, slice_3) {
+  const Model model = slice::createTestModel_3();
+  const std::vector<Request> requests = createRequests(slice::examples_3);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_relaxed_3) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_relaxed_3,
+                           slice::is_ignored_relaxed_3,
+                           slice::examples_relaxed_3);
+}
+
+TEST_F(ValidationTest, slice_relaxed_3) {
+  const Model model = slice::createTestModel_relaxed_3();
+  const std::vector<Request> requests = createRequests(slice::examples_relaxed_3);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_float16_3) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_float16_3,
+                           slice::is_ignored_float16_3,
+                           slice::examples_float16_3);
+}
+
+TEST_F(ValidationTest, slice_float16_3) {
+  const Model model = slice::createTestModel_float16_3();
+  const std::vector<Request> requests = createRequests(slice::examples_float16_3);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_4) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_4,
+                           slice::is_ignored_4,
+                           slice::examples_4);
+}
+
+TEST_F(ValidationTest, slice_4) {
+  const Model model = slice::createTestModel_4();
+  const std::vector<Request> requests = createRequests(slice::examples_4);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_relaxed_4) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_relaxed_4,
+                           slice::is_ignored_relaxed_4,
+                           slice::examples_relaxed_4);
+}
+
+TEST_F(ValidationTest, slice_relaxed_4) {
+  const Model model = slice::createTestModel_relaxed_4();
+  const std::vector<Request> requests = createRequests(slice::examples_relaxed_4);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_float16_4) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_float16_4,
+                           slice::is_ignored_float16_4,
+                           slice::examples_float16_4);
+}
+
+TEST_F(ValidationTest, slice_float16_4) {
+  const Model model = slice::createTestModel_float16_4();
+  const std::vector<Request> requests = createRequests(slice::examples_float16_4);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_5) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_5,
+                           slice::is_ignored_5,
+                           slice::examples_5);
+}
+
+TEST_F(ValidationTest, slice_5) {
+  const Model model = slice::createTestModel_5();
+  const std::vector<Request> requests = createRequests(slice::examples_5);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_relaxed_5) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_relaxed_5,
+                           slice::is_ignored_relaxed_5,
+                           slice::examples_relaxed_5);
+}
+
+TEST_F(ValidationTest, slice_relaxed_5) {
+  const Model model = slice::createTestModel_relaxed_5();
+  const std::vector<Request> requests = createRequests(slice::examples_relaxed_5);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_float16_5) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_float16_5,
+                           slice::is_ignored_float16_5,
+                           slice::examples_float16_5);
+}
+
+TEST_F(ValidationTest, slice_float16_5) {
+  const Model model = slice::createTestModel_float16_5();
+  const std::vector<Request> requests = createRequests(slice::examples_float16_5);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_6) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_6,
+                           slice::is_ignored_6,
+                           slice::examples_6);
+}
+
+TEST_F(ValidationTest, slice_6) {
+  const Model model = slice::createTestModel_6();
+  const std::vector<Request> requests = createRequests(slice::examples_6);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_relaxed_6) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_relaxed_6,
+                           slice::is_ignored_relaxed_6,
+                           slice::examples_relaxed_6);
+}
+
+TEST_F(ValidationTest, slice_relaxed_6) {
+  const Model model = slice::createTestModel_relaxed_6();
+  const std::vector<Request> requests = createRequests(slice::examples_relaxed_6);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_float16_6) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_float16_6,
+                           slice::is_ignored_float16_6,
+                           slice::examples_float16_6);
+}
+
+TEST_F(ValidationTest, slice_float16_6) {
+  const Model model = slice::createTestModel_float16_6();
+  const std::vector<Request> requests = createRequests(slice::examples_float16_6);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_7) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_7,
+                           slice::is_ignored_7,
+                           slice::examples_7);
+}
+
+TEST_F(ValidationTest, slice_7) {
+  const Model model = slice::createTestModel_7();
+  const std::vector<Request> requests = createRequests(slice::examples_7);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_relaxed_7) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_relaxed_7,
+                           slice::is_ignored_relaxed_7,
+                           slice::examples_relaxed_7);
+}
+
+TEST_F(ValidationTest, slice_relaxed_7) {
+  const Model model = slice::createTestModel_relaxed_7();
+  const std::vector<Request> requests = createRequests(slice::examples_relaxed_7);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_float16_7) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_float16_7,
+                           slice::is_ignored_float16_7,
+                           slice::examples_float16_7);
+}
+
+TEST_F(ValidationTest, slice_float16_7) {
+  const Model model = slice::createTestModel_float16_7();
+  const std::vector<Request> requests = createRequests(slice::examples_float16_7);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_8) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_8,
+                           slice::is_ignored_8,
+                           slice::examples_8);
+}
+
+TEST_F(ValidationTest, slice_8) {
+  const Model model = slice::createTestModel_8();
+  const std::vector<Request> requests = createRequests(slice::examples_8);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_relaxed_8) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_relaxed_8,
+                           slice::is_ignored_relaxed_8,
+                           slice::examples_relaxed_8);
+}
+
+TEST_F(ValidationTest, slice_relaxed_8) {
+  const Model model = slice::createTestModel_relaxed_8();
+  const std::vector<Request> requests = createRequests(slice::examples_relaxed_8);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_float16_8) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_float16_8,
+                           slice::is_ignored_float16_8,
+                           slice::examples_float16_8);
+}
+
+TEST_F(ValidationTest, slice_float16_8) {
+  const Model model = slice::createTestModel_float16_8();
+  const std::vector<Request> requests = createRequests(slice::examples_float16_8);
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11410,12 +12652,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel,
                            softmax_v1_2::is_ignored,
-                           softmax_v1_2::examples);
+                           softmax_v1_2::get_examples());
 }
 
 TEST_F(ValidationTest, softmax_v1_2) {
   const Model model = softmax_v1_2::createTestModel();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11425,12 +12667,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_dim1_axis0,
                            softmax_v1_2::is_ignored_dim1_axis0,
-                           softmax_v1_2::examples_dim1_axis0);
+                           softmax_v1_2::get_examples_dim1_axis0());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_dim1_axis0) {
   const Model model = softmax_v1_2::createTestModel_dim1_axis0();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_dim1_axis0);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_dim1_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11440,12 +12682,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_dim3_axis2,
                            softmax_v1_2::is_ignored_dim3_axis2,
-                           softmax_v1_2::examples_dim3_axis2);
+                           softmax_v1_2::get_examples_dim3_axis2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_dim3_axis2) {
   const Model model = softmax_v1_2::createTestModel_dim3_axis2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_dim3_axis2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_dim3_axis2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11455,12 +12697,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_relaxed,
                            softmax_v1_2::is_ignored_relaxed,
-                           softmax_v1_2::examples_relaxed);
+                           softmax_v1_2::get_examples_relaxed());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_relaxed) {
   const Model model = softmax_v1_2::createTestModel_relaxed();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_relaxed);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11470,12 +12712,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_relaxed_dim1_axis0,
                            softmax_v1_2::is_ignored_relaxed_dim1_axis0,
-                           softmax_v1_2::examples_relaxed_dim1_axis0);
+                           softmax_v1_2::get_examples_relaxed_dim1_axis0());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_relaxed_dim1_axis0) {
   const Model model = softmax_v1_2::createTestModel_relaxed_dim1_axis0();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_relaxed_dim1_axis0);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_relaxed_dim1_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11485,12 +12727,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_relaxed_dim3_axis2,
                            softmax_v1_2::is_ignored_relaxed_dim3_axis2,
-                           softmax_v1_2::examples_relaxed_dim3_axis2);
+                           softmax_v1_2::get_examples_relaxed_dim3_axis2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_relaxed_dim3_axis2) {
   const Model model = softmax_v1_2::createTestModel_relaxed_dim3_axis2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_relaxed_dim3_axis2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_relaxed_dim3_axis2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11500,12 +12742,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_float16,
                            softmax_v1_2::is_ignored_float16,
-                           softmax_v1_2::examples_float16);
+                           softmax_v1_2::get_examples_float16());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_float16) {
   const Model model = softmax_v1_2::createTestModel_float16();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_float16);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11515,12 +12757,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_float16_dim1_axis0,
                            softmax_v1_2::is_ignored_float16_dim1_axis0,
-                           softmax_v1_2::examples_float16_dim1_axis0);
+                           softmax_v1_2::get_examples_float16_dim1_axis0());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_float16_dim1_axis0) {
   const Model model = softmax_v1_2::createTestModel_float16_dim1_axis0();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_float16_dim1_axis0);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_float16_dim1_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11530,12 +12772,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_float16_dim3_axis2,
                            softmax_v1_2::is_ignored_float16_dim3_axis2,
-                           softmax_v1_2::examples_float16_dim3_axis2);
+                           softmax_v1_2::get_examples_float16_dim3_axis2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_float16_dim3_axis2) {
   const Model model = softmax_v1_2::createTestModel_float16_dim3_axis2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_float16_dim3_axis2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_float16_dim3_axis2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11545,12 +12787,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_quant8,
                            softmax_v1_2::is_ignored_quant8,
-                           softmax_v1_2::examples_quant8);
+                           softmax_v1_2::get_examples_quant8());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_quant8) {
   const Model model = softmax_v1_2::createTestModel_quant8();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_quant8);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11560,12 +12802,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_quant8_dim1_axis0,
                            softmax_v1_2::is_ignored_quant8_dim1_axis0,
-                           softmax_v1_2::examples_quant8_dim1_axis0);
+                           softmax_v1_2::get_examples_quant8_dim1_axis0());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_quant8_dim1_axis0) {
   const Model model = softmax_v1_2::createTestModel_quant8_dim1_axis0();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_quant8_dim1_axis0);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_quant8_dim1_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11575,12 +12817,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_quant8_dim3_axis2,
                            softmax_v1_2::is_ignored_quant8_dim3_axis2,
-                           softmax_v1_2::examples_quant8_dim3_axis2);
+                           softmax_v1_2::get_examples_quant8_dim3_axis2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_quant8_dim3_axis2) {
   const Model model = softmax_v1_2::createTestModel_quant8_dim3_axis2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_quant8_dim3_axis2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_quant8_dim3_axis2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11590,12 +12832,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_2,
                            softmax_v1_2::is_ignored_2,
-                           softmax_v1_2::examples_2);
+                           softmax_v1_2::get_examples_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_2) {
   const Model model = softmax_v1_2::createTestModel_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11605,12 +12847,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_dim1_axis0_2,
                            softmax_v1_2::is_ignored_dim1_axis0_2,
-                           softmax_v1_2::examples_dim1_axis0_2);
+                           softmax_v1_2::get_examples_dim1_axis0_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_dim1_axis0_2) {
   const Model model = softmax_v1_2::createTestModel_dim1_axis0_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_dim1_axis0_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_dim1_axis0_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11620,12 +12862,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_dim3_axis2_2,
                            softmax_v1_2::is_ignored_dim3_axis2_2,
-                           softmax_v1_2::examples_dim3_axis2_2);
+                           softmax_v1_2::get_examples_dim3_axis2_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_dim3_axis2_2) {
   const Model model = softmax_v1_2::createTestModel_dim3_axis2_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_dim3_axis2_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_dim3_axis2_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11635,12 +12877,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_relaxed_2,
                            softmax_v1_2::is_ignored_relaxed_2,
-                           softmax_v1_2::examples_relaxed_2);
+                           softmax_v1_2::get_examples_relaxed_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_relaxed_2) {
   const Model model = softmax_v1_2::createTestModel_relaxed_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_relaxed_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_relaxed_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11650,12 +12892,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_relaxed_dim1_axis0_2,
                            softmax_v1_2::is_ignored_relaxed_dim1_axis0_2,
-                           softmax_v1_2::examples_relaxed_dim1_axis0_2);
+                           softmax_v1_2::get_examples_relaxed_dim1_axis0_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_relaxed_dim1_axis0_2) {
   const Model model = softmax_v1_2::createTestModel_relaxed_dim1_axis0_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_relaxed_dim1_axis0_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_relaxed_dim1_axis0_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11665,12 +12907,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_relaxed_dim3_axis2_2,
                            softmax_v1_2::is_ignored_relaxed_dim3_axis2_2,
-                           softmax_v1_2::examples_relaxed_dim3_axis2_2);
+                           softmax_v1_2::get_examples_relaxed_dim3_axis2_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_relaxed_dim3_axis2_2) {
   const Model model = softmax_v1_2::createTestModel_relaxed_dim3_axis2_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_relaxed_dim3_axis2_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_relaxed_dim3_axis2_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11680,12 +12922,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_float16_2,
                            softmax_v1_2::is_ignored_float16_2,
-                           softmax_v1_2::examples_float16_2);
+                           softmax_v1_2::get_examples_float16_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_float16_2) {
   const Model model = softmax_v1_2::createTestModel_float16_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_float16_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_float16_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11695,12 +12937,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_float16_dim1_axis0_2,
                            softmax_v1_2::is_ignored_float16_dim1_axis0_2,
-                           softmax_v1_2::examples_float16_dim1_axis0_2);
+                           softmax_v1_2::get_examples_float16_dim1_axis0_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_float16_dim1_axis0_2) {
   const Model model = softmax_v1_2::createTestModel_float16_dim1_axis0_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_float16_dim1_axis0_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_float16_dim1_axis0_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11710,12 +12952,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_float16_dim3_axis2_2,
                            softmax_v1_2::is_ignored_float16_dim3_axis2_2,
-                           softmax_v1_2::examples_float16_dim3_axis2_2);
+                           softmax_v1_2::get_examples_float16_dim3_axis2_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_float16_dim3_axis2_2) {
   const Model model = softmax_v1_2::createTestModel_float16_dim3_axis2_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_float16_dim3_axis2_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_float16_dim3_axis2_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11725,12 +12967,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_quant8_2,
                            softmax_v1_2::is_ignored_quant8_2,
-                           softmax_v1_2::examples_quant8_2);
+                           softmax_v1_2::get_examples_quant8_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_quant8_2) {
   const Model model = softmax_v1_2::createTestModel_quant8_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_quant8_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11740,12 +12982,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_quant8_dim1_axis0_2,
                            softmax_v1_2::is_ignored_quant8_dim1_axis0_2,
-                           softmax_v1_2::examples_quant8_dim1_axis0_2);
+                           softmax_v1_2::get_examples_quant8_dim1_axis0_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_quant8_dim1_axis0_2) {
   const Model model = softmax_v1_2::createTestModel_quant8_dim1_axis0_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_quant8_dim1_axis0_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_quant8_dim1_axis0_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11755,12 +12997,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_quant8_dim3_axis2_2,
                            softmax_v1_2::is_ignored_quant8_dim3_axis2_2,
-                           softmax_v1_2::examples_quant8_dim3_axis2_2);
+                           softmax_v1_2::get_examples_quant8_dim3_axis2_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_quant8_dim3_axis2_2) {
   const Model model = softmax_v1_2::createTestModel_quant8_dim3_axis2_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_quant8_dim3_axis2_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_quant8_dim3_axis2_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11770,12 +13012,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim4_axis0,
                            softmax_v1_2::is_ignored_axis_dim4_axis0,
-                           softmax_v1_2::examples_axis_dim4_axis0);
+                           softmax_v1_2::get_examples_axis_dim4_axis0());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim4_axis0) {
   const Model model = softmax_v1_2::createTestModel_axis_dim4_axis0();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim4_axis0);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim4_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11785,12 +13027,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim4_axis0_neg,
                            softmax_v1_2::is_ignored_axis_dim4_axis0_neg,
-                           softmax_v1_2::examples_axis_dim4_axis0_neg);
+                           softmax_v1_2::get_examples_axis_dim4_axis0_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim4_axis0_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_dim4_axis0_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim4_axis0_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim4_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11800,12 +13042,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim4_axis1,
                            softmax_v1_2::is_ignored_axis_dim4_axis1,
-                           softmax_v1_2::examples_axis_dim4_axis1);
+                           softmax_v1_2::get_examples_axis_dim4_axis1());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim4_axis1) {
   const Model model = softmax_v1_2::createTestModel_axis_dim4_axis1();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim4_axis1);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim4_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11815,12 +13057,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim4_axis1_neg,
                            softmax_v1_2::is_ignored_axis_dim4_axis1_neg,
-                           softmax_v1_2::examples_axis_dim4_axis1_neg);
+                           softmax_v1_2::get_examples_axis_dim4_axis1_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim4_axis1_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_dim4_axis1_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim4_axis1_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim4_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11830,12 +13072,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim4_axis2,
                            softmax_v1_2::is_ignored_axis_dim4_axis2,
-                           softmax_v1_2::examples_axis_dim4_axis2);
+                           softmax_v1_2::get_examples_axis_dim4_axis2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim4_axis2) {
   const Model model = softmax_v1_2::createTestModel_axis_dim4_axis2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim4_axis2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim4_axis2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11845,12 +13087,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim4_axis2_neg,
                            softmax_v1_2::is_ignored_axis_dim4_axis2_neg,
-                           softmax_v1_2::examples_axis_dim4_axis2_neg);
+                           softmax_v1_2::get_examples_axis_dim4_axis2_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim4_axis2_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_dim4_axis2_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim4_axis2_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim4_axis2_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11860,12 +13102,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim4_axis3,
                            softmax_v1_2::is_ignored_axis_dim4_axis3,
-                           softmax_v1_2::examples_axis_dim4_axis3);
+                           softmax_v1_2::get_examples_axis_dim4_axis3());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim4_axis3) {
   const Model model = softmax_v1_2::createTestModel_axis_dim4_axis3();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim4_axis3);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim4_axis3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11875,12 +13117,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim4_axis3_neg,
                            softmax_v1_2::is_ignored_axis_dim4_axis3_neg,
-                           softmax_v1_2::examples_axis_dim4_axis3_neg);
+                           softmax_v1_2::get_examples_axis_dim4_axis3_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim4_axis3_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_dim4_axis3_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim4_axis3_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim4_axis3_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11890,12 +13132,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim3_axis0,
                            softmax_v1_2::is_ignored_axis_dim3_axis0,
-                           softmax_v1_2::examples_axis_dim3_axis0);
+                           softmax_v1_2::get_examples_axis_dim3_axis0());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim3_axis0) {
   const Model model = softmax_v1_2::createTestModel_axis_dim3_axis0();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim3_axis0);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim3_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11905,12 +13147,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim3_axis0_neg,
                            softmax_v1_2::is_ignored_axis_dim3_axis0_neg,
-                           softmax_v1_2::examples_axis_dim3_axis0_neg);
+                           softmax_v1_2::get_examples_axis_dim3_axis0_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim3_axis0_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_dim3_axis0_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim3_axis0_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim3_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11920,12 +13162,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim3_axis1,
                            softmax_v1_2::is_ignored_axis_dim3_axis1,
-                           softmax_v1_2::examples_axis_dim3_axis1);
+                           softmax_v1_2::get_examples_axis_dim3_axis1());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim3_axis1) {
   const Model model = softmax_v1_2::createTestModel_axis_dim3_axis1();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim3_axis1);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim3_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11935,12 +13177,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim3_axis1_neg,
                            softmax_v1_2::is_ignored_axis_dim3_axis1_neg,
-                           softmax_v1_2::examples_axis_dim3_axis1_neg);
+                           softmax_v1_2::get_examples_axis_dim3_axis1_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim3_axis1_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_dim3_axis1_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim3_axis1_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim3_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11950,12 +13192,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim3_axis2,
                            softmax_v1_2::is_ignored_axis_dim3_axis2,
-                           softmax_v1_2::examples_axis_dim3_axis2);
+                           softmax_v1_2::get_examples_axis_dim3_axis2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim3_axis2) {
   const Model model = softmax_v1_2::createTestModel_axis_dim3_axis2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim3_axis2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim3_axis2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11965,12 +13207,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim3_axis2_neg,
                            softmax_v1_2::is_ignored_axis_dim3_axis2_neg,
-                           softmax_v1_2::examples_axis_dim3_axis2_neg);
+                           softmax_v1_2::get_examples_axis_dim3_axis2_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim3_axis2_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_dim3_axis2_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim3_axis2_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim3_axis2_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11980,12 +13222,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim2_axis0,
                            softmax_v1_2::is_ignored_axis_dim2_axis0,
-                           softmax_v1_2::examples_axis_dim2_axis0);
+                           softmax_v1_2::get_examples_axis_dim2_axis0());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim2_axis0) {
   const Model model = softmax_v1_2::createTestModel_axis_dim2_axis0();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim2_axis0);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim2_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -11995,12 +13237,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim2_axis0_neg,
                            softmax_v1_2::is_ignored_axis_dim2_axis0_neg,
-                           softmax_v1_2::examples_axis_dim2_axis0_neg);
+                           softmax_v1_2::get_examples_axis_dim2_axis0_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim2_axis0_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_dim2_axis0_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim2_axis0_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim2_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12010,12 +13252,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim2_axis1,
                            softmax_v1_2::is_ignored_axis_dim2_axis1,
-                           softmax_v1_2::examples_axis_dim2_axis1);
+                           softmax_v1_2::get_examples_axis_dim2_axis1());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim2_axis1) {
   const Model model = softmax_v1_2::createTestModel_axis_dim2_axis1();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim2_axis1);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim2_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12025,12 +13267,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim2_axis1_neg,
                            softmax_v1_2::is_ignored_axis_dim2_axis1_neg,
-                           softmax_v1_2::examples_axis_dim2_axis1_neg);
+                           softmax_v1_2::get_examples_axis_dim2_axis1_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim2_axis1_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_dim2_axis1_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim2_axis1_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim2_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12040,12 +13282,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim1_axis0,
                            softmax_v1_2::is_ignored_axis_dim1_axis0,
-                           softmax_v1_2::examples_axis_dim1_axis0);
+                           softmax_v1_2::get_examples_axis_dim1_axis0());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim1_axis0) {
   const Model model = softmax_v1_2::createTestModel_axis_dim1_axis0();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim1_axis0);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim1_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12055,12 +13297,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim1_axis0_neg,
                            softmax_v1_2::is_ignored_axis_dim1_axis0_neg,
-                           softmax_v1_2::examples_axis_dim1_axis0_neg);
+                           softmax_v1_2::get_examples_axis_dim1_axis0_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim1_axis0_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_dim1_axis0_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim1_axis0_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim1_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12070,12 +13312,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim4_axis0,
                            softmax_v1_2::is_ignored_axis_relaxed_dim4_axis0,
-                           softmax_v1_2::examples_axis_relaxed_dim4_axis0);
+                           softmax_v1_2::get_examples_axis_relaxed_dim4_axis0());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim4_axis0) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim4_axis0();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim4_axis0);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim4_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12085,12 +13327,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim4_axis0_neg,
                            softmax_v1_2::is_ignored_axis_relaxed_dim4_axis0_neg,
-                           softmax_v1_2::examples_axis_relaxed_dim4_axis0_neg);
+                           softmax_v1_2::get_examples_axis_relaxed_dim4_axis0_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim4_axis0_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim4_axis0_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim4_axis0_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim4_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12100,12 +13342,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim4_axis1,
                            softmax_v1_2::is_ignored_axis_relaxed_dim4_axis1,
-                           softmax_v1_2::examples_axis_relaxed_dim4_axis1);
+                           softmax_v1_2::get_examples_axis_relaxed_dim4_axis1());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim4_axis1) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim4_axis1();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim4_axis1);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim4_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12115,12 +13357,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim4_axis1_neg,
                            softmax_v1_2::is_ignored_axis_relaxed_dim4_axis1_neg,
-                           softmax_v1_2::examples_axis_relaxed_dim4_axis1_neg);
+                           softmax_v1_2::get_examples_axis_relaxed_dim4_axis1_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim4_axis1_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim4_axis1_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim4_axis1_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim4_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12130,12 +13372,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim4_axis2,
                            softmax_v1_2::is_ignored_axis_relaxed_dim4_axis2,
-                           softmax_v1_2::examples_axis_relaxed_dim4_axis2);
+                           softmax_v1_2::get_examples_axis_relaxed_dim4_axis2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim4_axis2) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim4_axis2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim4_axis2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim4_axis2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12145,12 +13387,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim4_axis2_neg,
                            softmax_v1_2::is_ignored_axis_relaxed_dim4_axis2_neg,
-                           softmax_v1_2::examples_axis_relaxed_dim4_axis2_neg);
+                           softmax_v1_2::get_examples_axis_relaxed_dim4_axis2_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim4_axis2_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim4_axis2_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim4_axis2_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim4_axis2_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12160,12 +13402,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim4_axis3,
                            softmax_v1_2::is_ignored_axis_relaxed_dim4_axis3,
-                           softmax_v1_2::examples_axis_relaxed_dim4_axis3);
+                           softmax_v1_2::get_examples_axis_relaxed_dim4_axis3());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim4_axis3) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim4_axis3();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim4_axis3);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim4_axis3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12175,12 +13417,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim4_axis3_neg,
                            softmax_v1_2::is_ignored_axis_relaxed_dim4_axis3_neg,
-                           softmax_v1_2::examples_axis_relaxed_dim4_axis3_neg);
+                           softmax_v1_2::get_examples_axis_relaxed_dim4_axis3_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim4_axis3_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim4_axis3_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim4_axis3_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim4_axis3_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12190,12 +13432,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim3_axis0,
                            softmax_v1_2::is_ignored_axis_relaxed_dim3_axis0,
-                           softmax_v1_2::examples_axis_relaxed_dim3_axis0);
+                           softmax_v1_2::get_examples_axis_relaxed_dim3_axis0());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim3_axis0) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim3_axis0();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim3_axis0);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim3_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12205,12 +13447,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim3_axis0_neg,
                            softmax_v1_2::is_ignored_axis_relaxed_dim3_axis0_neg,
-                           softmax_v1_2::examples_axis_relaxed_dim3_axis0_neg);
+                           softmax_v1_2::get_examples_axis_relaxed_dim3_axis0_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim3_axis0_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim3_axis0_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim3_axis0_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim3_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12220,12 +13462,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim3_axis1,
                            softmax_v1_2::is_ignored_axis_relaxed_dim3_axis1,
-                           softmax_v1_2::examples_axis_relaxed_dim3_axis1);
+                           softmax_v1_2::get_examples_axis_relaxed_dim3_axis1());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim3_axis1) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim3_axis1();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim3_axis1);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim3_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12235,12 +13477,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim3_axis1_neg,
                            softmax_v1_2::is_ignored_axis_relaxed_dim3_axis1_neg,
-                           softmax_v1_2::examples_axis_relaxed_dim3_axis1_neg);
+                           softmax_v1_2::get_examples_axis_relaxed_dim3_axis1_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim3_axis1_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim3_axis1_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim3_axis1_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim3_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12250,12 +13492,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim3_axis2,
                            softmax_v1_2::is_ignored_axis_relaxed_dim3_axis2,
-                           softmax_v1_2::examples_axis_relaxed_dim3_axis2);
+                           softmax_v1_2::get_examples_axis_relaxed_dim3_axis2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim3_axis2) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim3_axis2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim3_axis2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim3_axis2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12265,12 +13507,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim3_axis2_neg,
                            softmax_v1_2::is_ignored_axis_relaxed_dim3_axis2_neg,
-                           softmax_v1_2::examples_axis_relaxed_dim3_axis2_neg);
+                           softmax_v1_2::get_examples_axis_relaxed_dim3_axis2_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim3_axis2_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim3_axis2_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim3_axis2_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim3_axis2_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12280,12 +13522,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim2_axis0,
                            softmax_v1_2::is_ignored_axis_relaxed_dim2_axis0,
-                           softmax_v1_2::examples_axis_relaxed_dim2_axis0);
+                           softmax_v1_2::get_examples_axis_relaxed_dim2_axis0());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim2_axis0) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim2_axis0();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim2_axis0);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim2_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12295,12 +13537,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim2_axis0_neg,
                            softmax_v1_2::is_ignored_axis_relaxed_dim2_axis0_neg,
-                           softmax_v1_2::examples_axis_relaxed_dim2_axis0_neg);
+                           softmax_v1_2::get_examples_axis_relaxed_dim2_axis0_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim2_axis0_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim2_axis0_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim2_axis0_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim2_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12310,12 +13552,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim2_axis1,
                            softmax_v1_2::is_ignored_axis_relaxed_dim2_axis1,
-                           softmax_v1_2::examples_axis_relaxed_dim2_axis1);
+                           softmax_v1_2::get_examples_axis_relaxed_dim2_axis1());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim2_axis1) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim2_axis1();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim2_axis1);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim2_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12325,12 +13567,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim2_axis1_neg,
                            softmax_v1_2::is_ignored_axis_relaxed_dim2_axis1_neg,
-                           softmax_v1_2::examples_axis_relaxed_dim2_axis1_neg);
+                           softmax_v1_2::get_examples_axis_relaxed_dim2_axis1_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim2_axis1_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim2_axis1_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim2_axis1_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim2_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12340,12 +13582,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim1_axis0,
                            softmax_v1_2::is_ignored_axis_relaxed_dim1_axis0,
-                           softmax_v1_2::examples_axis_relaxed_dim1_axis0);
+                           softmax_v1_2::get_examples_axis_relaxed_dim1_axis0());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim1_axis0) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim1_axis0();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim1_axis0);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim1_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12355,12 +13597,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim1_axis0_neg,
                            softmax_v1_2::is_ignored_axis_relaxed_dim1_axis0_neg,
-                           softmax_v1_2::examples_axis_relaxed_dim1_axis0_neg);
+                           softmax_v1_2::get_examples_axis_relaxed_dim1_axis0_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim1_axis0_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim1_axis0_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim1_axis0_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim1_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12370,12 +13612,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim4_axis0,
                            softmax_v1_2::is_ignored_axis_float16_dim4_axis0,
-                           softmax_v1_2::examples_axis_float16_dim4_axis0);
+                           softmax_v1_2::get_examples_axis_float16_dim4_axis0());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim4_axis0) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim4_axis0();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim4_axis0);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim4_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12385,12 +13627,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim4_axis0_neg,
                            softmax_v1_2::is_ignored_axis_float16_dim4_axis0_neg,
-                           softmax_v1_2::examples_axis_float16_dim4_axis0_neg);
+                           softmax_v1_2::get_examples_axis_float16_dim4_axis0_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim4_axis0_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim4_axis0_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim4_axis0_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim4_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12400,12 +13642,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim4_axis1,
                            softmax_v1_2::is_ignored_axis_float16_dim4_axis1,
-                           softmax_v1_2::examples_axis_float16_dim4_axis1);
+                           softmax_v1_2::get_examples_axis_float16_dim4_axis1());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim4_axis1) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim4_axis1();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim4_axis1);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim4_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12415,12 +13657,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim4_axis1_neg,
                            softmax_v1_2::is_ignored_axis_float16_dim4_axis1_neg,
-                           softmax_v1_2::examples_axis_float16_dim4_axis1_neg);
+                           softmax_v1_2::get_examples_axis_float16_dim4_axis1_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim4_axis1_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim4_axis1_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim4_axis1_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim4_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12430,12 +13672,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim4_axis2,
                            softmax_v1_2::is_ignored_axis_float16_dim4_axis2,
-                           softmax_v1_2::examples_axis_float16_dim4_axis2);
+                           softmax_v1_2::get_examples_axis_float16_dim4_axis2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim4_axis2) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim4_axis2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim4_axis2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim4_axis2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12445,12 +13687,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim4_axis2_neg,
                            softmax_v1_2::is_ignored_axis_float16_dim4_axis2_neg,
-                           softmax_v1_2::examples_axis_float16_dim4_axis2_neg);
+                           softmax_v1_2::get_examples_axis_float16_dim4_axis2_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim4_axis2_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim4_axis2_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim4_axis2_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim4_axis2_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12460,12 +13702,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim4_axis3,
                            softmax_v1_2::is_ignored_axis_float16_dim4_axis3,
-                           softmax_v1_2::examples_axis_float16_dim4_axis3);
+                           softmax_v1_2::get_examples_axis_float16_dim4_axis3());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim4_axis3) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim4_axis3();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim4_axis3);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim4_axis3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12475,12 +13717,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim4_axis3_neg,
                            softmax_v1_2::is_ignored_axis_float16_dim4_axis3_neg,
-                           softmax_v1_2::examples_axis_float16_dim4_axis3_neg);
+                           softmax_v1_2::get_examples_axis_float16_dim4_axis3_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim4_axis3_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim4_axis3_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim4_axis3_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim4_axis3_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12490,12 +13732,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim3_axis0,
                            softmax_v1_2::is_ignored_axis_float16_dim3_axis0,
-                           softmax_v1_2::examples_axis_float16_dim3_axis0);
+                           softmax_v1_2::get_examples_axis_float16_dim3_axis0());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim3_axis0) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim3_axis0();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim3_axis0);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim3_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12505,12 +13747,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim3_axis0_neg,
                            softmax_v1_2::is_ignored_axis_float16_dim3_axis0_neg,
-                           softmax_v1_2::examples_axis_float16_dim3_axis0_neg);
+                           softmax_v1_2::get_examples_axis_float16_dim3_axis0_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim3_axis0_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim3_axis0_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim3_axis0_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim3_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12520,12 +13762,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim3_axis1,
                            softmax_v1_2::is_ignored_axis_float16_dim3_axis1,
-                           softmax_v1_2::examples_axis_float16_dim3_axis1);
+                           softmax_v1_2::get_examples_axis_float16_dim3_axis1());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim3_axis1) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim3_axis1();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim3_axis1);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim3_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12535,12 +13777,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim3_axis1_neg,
                            softmax_v1_2::is_ignored_axis_float16_dim3_axis1_neg,
-                           softmax_v1_2::examples_axis_float16_dim3_axis1_neg);
+                           softmax_v1_2::get_examples_axis_float16_dim3_axis1_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim3_axis1_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim3_axis1_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim3_axis1_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim3_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12550,12 +13792,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim3_axis2,
                            softmax_v1_2::is_ignored_axis_float16_dim3_axis2,
-                           softmax_v1_2::examples_axis_float16_dim3_axis2);
+                           softmax_v1_2::get_examples_axis_float16_dim3_axis2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim3_axis2) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim3_axis2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim3_axis2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim3_axis2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12565,12 +13807,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim3_axis2_neg,
                            softmax_v1_2::is_ignored_axis_float16_dim3_axis2_neg,
-                           softmax_v1_2::examples_axis_float16_dim3_axis2_neg);
+                           softmax_v1_2::get_examples_axis_float16_dim3_axis2_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim3_axis2_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim3_axis2_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim3_axis2_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim3_axis2_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12580,12 +13822,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim2_axis0,
                            softmax_v1_2::is_ignored_axis_float16_dim2_axis0,
-                           softmax_v1_2::examples_axis_float16_dim2_axis0);
+                           softmax_v1_2::get_examples_axis_float16_dim2_axis0());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim2_axis0) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim2_axis0();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim2_axis0);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim2_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12595,12 +13837,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim2_axis0_neg,
                            softmax_v1_2::is_ignored_axis_float16_dim2_axis0_neg,
-                           softmax_v1_2::examples_axis_float16_dim2_axis0_neg);
+                           softmax_v1_2::get_examples_axis_float16_dim2_axis0_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim2_axis0_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim2_axis0_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim2_axis0_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim2_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12610,12 +13852,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim2_axis1,
                            softmax_v1_2::is_ignored_axis_float16_dim2_axis1,
-                           softmax_v1_2::examples_axis_float16_dim2_axis1);
+                           softmax_v1_2::get_examples_axis_float16_dim2_axis1());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim2_axis1) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim2_axis1();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim2_axis1);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim2_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12625,12 +13867,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim2_axis1_neg,
                            softmax_v1_2::is_ignored_axis_float16_dim2_axis1_neg,
-                           softmax_v1_2::examples_axis_float16_dim2_axis1_neg);
+                           softmax_v1_2::get_examples_axis_float16_dim2_axis1_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim2_axis1_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim2_axis1_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim2_axis1_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim2_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12640,12 +13882,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim1_axis0,
                            softmax_v1_2::is_ignored_axis_float16_dim1_axis0,
-                           softmax_v1_2::examples_axis_float16_dim1_axis0);
+                           softmax_v1_2::get_examples_axis_float16_dim1_axis0());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim1_axis0) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim1_axis0();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim1_axis0);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim1_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12655,12 +13897,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim1_axis0_neg,
                            softmax_v1_2::is_ignored_axis_float16_dim1_axis0_neg,
-                           softmax_v1_2::examples_axis_float16_dim1_axis0_neg);
+                           softmax_v1_2::get_examples_axis_float16_dim1_axis0_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim1_axis0_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim1_axis0_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim1_axis0_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim1_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12670,12 +13912,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim4_axis0,
                            softmax_v1_2::is_ignored_axis_quant8_dim4_axis0,
-                           softmax_v1_2::examples_axis_quant8_dim4_axis0);
+                           softmax_v1_2::get_examples_axis_quant8_dim4_axis0());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim4_axis0) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim4_axis0();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim4_axis0);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim4_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12685,12 +13927,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim4_axis0_neg,
                            softmax_v1_2::is_ignored_axis_quant8_dim4_axis0_neg,
-                           softmax_v1_2::examples_axis_quant8_dim4_axis0_neg);
+                           softmax_v1_2::get_examples_axis_quant8_dim4_axis0_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim4_axis0_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim4_axis0_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim4_axis0_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim4_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12700,12 +13942,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim4_axis1,
                            softmax_v1_2::is_ignored_axis_quant8_dim4_axis1,
-                           softmax_v1_2::examples_axis_quant8_dim4_axis1);
+                           softmax_v1_2::get_examples_axis_quant8_dim4_axis1());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim4_axis1) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim4_axis1();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim4_axis1);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim4_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12715,12 +13957,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim4_axis1_neg,
                            softmax_v1_2::is_ignored_axis_quant8_dim4_axis1_neg,
-                           softmax_v1_2::examples_axis_quant8_dim4_axis1_neg);
+                           softmax_v1_2::get_examples_axis_quant8_dim4_axis1_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim4_axis1_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim4_axis1_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim4_axis1_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim4_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12730,12 +13972,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim4_axis2,
                            softmax_v1_2::is_ignored_axis_quant8_dim4_axis2,
-                           softmax_v1_2::examples_axis_quant8_dim4_axis2);
+                           softmax_v1_2::get_examples_axis_quant8_dim4_axis2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim4_axis2) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim4_axis2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim4_axis2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim4_axis2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12745,12 +13987,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim4_axis2_neg,
                            softmax_v1_2::is_ignored_axis_quant8_dim4_axis2_neg,
-                           softmax_v1_2::examples_axis_quant8_dim4_axis2_neg);
+                           softmax_v1_2::get_examples_axis_quant8_dim4_axis2_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim4_axis2_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim4_axis2_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim4_axis2_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim4_axis2_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12760,12 +14002,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim4_axis3,
                            softmax_v1_2::is_ignored_axis_quant8_dim4_axis3,
-                           softmax_v1_2::examples_axis_quant8_dim4_axis3);
+                           softmax_v1_2::get_examples_axis_quant8_dim4_axis3());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim4_axis3) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim4_axis3();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim4_axis3);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim4_axis3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12775,12 +14017,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim4_axis3_neg,
                            softmax_v1_2::is_ignored_axis_quant8_dim4_axis3_neg,
-                           softmax_v1_2::examples_axis_quant8_dim4_axis3_neg);
+                           softmax_v1_2::get_examples_axis_quant8_dim4_axis3_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim4_axis3_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim4_axis3_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim4_axis3_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim4_axis3_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12790,12 +14032,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim3_axis0,
                            softmax_v1_2::is_ignored_axis_quant8_dim3_axis0,
-                           softmax_v1_2::examples_axis_quant8_dim3_axis0);
+                           softmax_v1_2::get_examples_axis_quant8_dim3_axis0());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim3_axis0) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim3_axis0();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim3_axis0);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim3_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12805,12 +14047,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim3_axis0_neg,
                            softmax_v1_2::is_ignored_axis_quant8_dim3_axis0_neg,
-                           softmax_v1_2::examples_axis_quant8_dim3_axis0_neg);
+                           softmax_v1_2::get_examples_axis_quant8_dim3_axis0_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim3_axis0_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim3_axis0_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim3_axis0_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim3_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12820,12 +14062,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim3_axis1,
                            softmax_v1_2::is_ignored_axis_quant8_dim3_axis1,
-                           softmax_v1_2::examples_axis_quant8_dim3_axis1);
+                           softmax_v1_2::get_examples_axis_quant8_dim3_axis1());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim3_axis1) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim3_axis1();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim3_axis1);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim3_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12835,12 +14077,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim3_axis1_neg,
                            softmax_v1_2::is_ignored_axis_quant8_dim3_axis1_neg,
-                           softmax_v1_2::examples_axis_quant8_dim3_axis1_neg);
+                           softmax_v1_2::get_examples_axis_quant8_dim3_axis1_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim3_axis1_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim3_axis1_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim3_axis1_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim3_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12850,12 +14092,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim3_axis2,
                            softmax_v1_2::is_ignored_axis_quant8_dim3_axis2,
-                           softmax_v1_2::examples_axis_quant8_dim3_axis2);
+                           softmax_v1_2::get_examples_axis_quant8_dim3_axis2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim3_axis2) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim3_axis2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim3_axis2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim3_axis2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12865,12 +14107,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim3_axis2_neg,
                            softmax_v1_2::is_ignored_axis_quant8_dim3_axis2_neg,
-                           softmax_v1_2::examples_axis_quant8_dim3_axis2_neg);
+                           softmax_v1_2::get_examples_axis_quant8_dim3_axis2_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim3_axis2_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim3_axis2_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim3_axis2_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim3_axis2_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12880,12 +14122,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim2_axis0,
                            softmax_v1_2::is_ignored_axis_quant8_dim2_axis0,
-                           softmax_v1_2::examples_axis_quant8_dim2_axis0);
+                           softmax_v1_2::get_examples_axis_quant8_dim2_axis0());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim2_axis0) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim2_axis0();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim2_axis0);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim2_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12895,12 +14137,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim2_axis0_neg,
                            softmax_v1_2::is_ignored_axis_quant8_dim2_axis0_neg,
-                           softmax_v1_2::examples_axis_quant8_dim2_axis0_neg);
+                           softmax_v1_2::get_examples_axis_quant8_dim2_axis0_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim2_axis0_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim2_axis0_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim2_axis0_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim2_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12910,12 +14152,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim2_axis1,
                            softmax_v1_2::is_ignored_axis_quant8_dim2_axis1,
-                           softmax_v1_2::examples_axis_quant8_dim2_axis1);
+                           softmax_v1_2::get_examples_axis_quant8_dim2_axis1());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim2_axis1) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim2_axis1();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim2_axis1);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim2_axis1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12925,12 +14167,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim2_axis1_neg,
                            softmax_v1_2::is_ignored_axis_quant8_dim2_axis1_neg,
-                           softmax_v1_2::examples_axis_quant8_dim2_axis1_neg);
+                           softmax_v1_2::get_examples_axis_quant8_dim2_axis1_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim2_axis1_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim2_axis1_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim2_axis1_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim2_axis1_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12940,12 +14182,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim1_axis0,
                            softmax_v1_2::is_ignored_axis_quant8_dim1_axis0,
-                           softmax_v1_2::examples_axis_quant8_dim1_axis0);
+                           softmax_v1_2::get_examples_axis_quant8_dim1_axis0());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim1_axis0) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim1_axis0();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim1_axis0);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim1_axis0());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12955,12 +14197,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim1_axis0_neg,
                            softmax_v1_2::is_ignored_axis_quant8_dim1_axis0_neg,
-                           softmax_v1_2::examples_axis_quant8_dim1_axis0_neg);
+                           softmax_v1_2::get_examples_axis_quant8_dim1_axis0_neg());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim1_axis0_neg) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim1_axis0_neg();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim1_axis0_neg);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim1_axis0_neg());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12970,12 +14212,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim4_axis0_2,
                            softmax_v1_2::is_ignored_axis_dim4_axis0_2,
-                           softmax_v1_2::examples_axis_dim4_axis0_2);
+                           softmax_v1_2::get_examples_axis_dim4_axis0_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim4_axis0_2) {
   const Model model = softmax_v1_2::createTestModel_axis_dim4_axis0_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim4_axis0_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim4_axis0_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -12985,12 +14227,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim4_axis0_neg_2,
                            softmax_v1_2::is_ignored_axis_dim4_axis0_neg_2,
-                           softmax_v1_2::examples_axis_dim4_axis0_neg_2);
+                           softmax_v1_2::get_examples_axis_dim4_axis0_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim4_axis0_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_dim4_axis0_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim4_axis0_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim4_axis0_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13000,12 +14242,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim4_axis1_2,
                            softmax_v1_2::is_ignored_axis_dim4_axis1_2,
-                           softmax_v1_2::examples_axis_dim4_axis1_2);
+                           softmax_v1_2::get_examples_axis_dim4_axis1_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim4_axis1_2) {
   const Model model = softmax_v1_2::createTestModel_axis_dim4_axis1_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim4_axis1_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim4_axis1_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13015,12 +14257,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim4_axis1_neg_2,
                            softmax_v1_2::is_ignored_axis_dim4_axis1_neg_2,
-                           softmax_v1_2::examples_axis_dim4_axis1_neg_2);
+                           softmax_v1_2::get_examples_axis_dim4_axis1_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim4_axis1_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_dim4_axis1_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim4_axis1_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim4_axis1_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13030,12 +14272,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim4_axis2_2,
                            softmax_v1_2::is_ignored_axis_dim4_axis2_2,
-                           softmax_v1_2::examples_axis_dim4_axis2_2);
+                           softmax_v1_2::get_examples_axis_dim4_axis2_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim4_axis2_2) {
   const Model model = softmax_v1_2::createTestModel_axis_dim4_axis2_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim4_axis2_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim4_axis2_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13045,12 +14287,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim4_axis2_neg_2,
                            softmax_v1_2::is_ignored_axis_dim4_axis2_neg_2,
-                           softmax_v1_2::examples_axis_dim4_axis2_neg_2);
+                           softmax_v1_2::get_examples_axis_dim4_axis2_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim4_axis2_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_dim4_axis2_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim4_axis2_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim4_axis2_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13060,12 +14302,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim4_axis3_2,
                            softmax_v1_2::is_ignored_axis_dim4_axis3_2,
-                           softmax_v1_2::examples_axis_dim4_axis3_2);
+                           softmax_v1_2::get_examples_axis_dim4_axis3_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim4_axis3_2) {
   const Model model = softmax_v1_2::createTestModel_axis_dim4_axis3_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim4_axis3_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim4_axis3_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13075,12 +14317,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim4_axis3_neg_2,
                            softmax_v1_2::is_ignored_axis_dim4_axis3_neg_2,
-                           softmax_v1_2::examples_axis_dim4_axis3_neg_2);
+                           softmax_v1_2::get_examples_axis_dim4_axis3_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim4_axis3_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_dim4_axis3_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim4_axis3_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim4_axis3_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13090,12 +14332,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim3_axis0_2,
                            softmax_v1_2::is_ignored_axis_dim3_axis0_2,
-                           softmax_v1_2::examples_axis_dim3_axis0_2);
+                           softmax_v1_2::get_examples_axis_dim3_axis0_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim3_axis0_2) {
   const Model model = softmax_v1_2::createTestModel_axis_dim3_axis0_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim3_axis0_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim3_axis0_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13105,12 +14347,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim3_axis0_neg_2,
                            softmax_v1_2::is_ignored_axis_dim3_axis0_neg_2,
-                           softmax_v1_2::examples_axis_dim3_axis0_neg_2);
+                           softmax_v1_2::get_examples_axis_dim3_axis0_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim3_axis0_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_dim3_axis0_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim3_axis0_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim3_axis0_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13120,12 +14362,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim3_axis1_2,
                            softmax_v1_2::is_ignored_axis_dim3_axis1_2,
-                           softmax_v1_2::examples_axis_dim3_axis1_2);
+                           softmax_v1_2::get_examples_axis_dim3_axis1_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim3_axis1_2) {
   const Model model = softmax_v1_2::createTestModel_axis_dim3_axis1_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim3_axis1_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim3_axis1_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13135,12 +14377,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim3_axis1_neg_2,
                            softmax_v1_2::is_ignored_axis_dim3_axis1_neg_2,
-                           softmax_v1_2::examples_axis_dim3_axis1_neg_2);
+                           softmax_v1_2::get_examples_axis_dim3_axis1_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim3_axis1_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_dim3_axis1_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim3_axis1_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim3_axis1_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13150,12 +14392,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim3_axis2_2,
                            softmax_v1_2::is_ignored_axis_dim3_axis2_2,
-                           softmax_v1_2::examples_axis_dim3_axis2_2);
+                           softmax_v1_2::get_examples_axis_dim3_axis2_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim3_axis2_2) {
   const Model model = softmax_v1_2::createTestModel_axis_dim3_axis2_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim3_axis2_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim3_axis2_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13165,12 +14407,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim3_axis2_neg_2,
                            softmax_v1_2::is_ignored_axis_dim3_axis2_neg_2,
-                           softmax_v1_2::examples_axis_dim3_axis2_neg_2);
+                           softmax_v1_2::get_examples_axis_dim3_axis2_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim3_axis2_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_dim3_axis2_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim3_axis2_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim3_axis2_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13180,12 +14422,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim2_axis0_2,
                            softmax_v1_2::is_ignored_axis_dim2_axis0_2,
-                           softmax_v1_2::examples_axis_dim2_axis0_2);
+                           softmax_v1_2::get_examples_axis_dim2_axis0_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim2_axis0_2) {
   const Model model = softmax_v1_2::createTestModel_axis_dim2_axis0_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim2_axis0_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim2_axis0_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13195,12 +14437,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim2_axis0_neg_2,
                            softmax_v1_2::is_ignored_axis_dim2_axis0_neg_2,
-                           softmax_v1_2::examples_axis_dim2_axis0_neg_2);
+                           softmax_v1_2::get_examples_axis_dim2_axis0_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim2_axis0_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_dim2_axis0_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim2_axis0_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim2_axis0_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13210,12 +14452,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim2_axis1_2,
                            softmax_v1_2::is_ignored_axis_dim2_axis1_2,
-                           softmax_v1_2::examples_axis_dim2_axis1_2);
+                           softmax_v1_2::get_examples_axis_dim2_axis1_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim2_axis1_2) {
   const Model model = softmax_v1_2::createTestModel_axis_dim2_axis1_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim2_axis1_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim2_axis1_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13225,12 +14467,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim2_axis1_neg_2,
                            softmax_v1_2::is_ignored_axis_dim2_axis1_neg_2,
-                           softmax_v1_2::examples_axis_dim2_axis1_neg_2);
+                           softmax_v1_2::get_examples_axis_dim2_axis1_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim2_axis1_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_dim2_axis1_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim2_axis1_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim2_axis1_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13240,12 +14482,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim1_axis0_2,
                            softmax_v1_2::is_ignored_axis_dim1_axis0_2,
-                           softmax_v1_2::examples_axis_dim1_axis0_2);
+                           softmax_v1_2::get_examples_axis_dim1_axis0_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim1_axis0_2) {
   const Model model = softmax_v1_2::createTestModel_axis_dim1_axis0_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim1_axis0_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim1_axis0_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13255,12 +14497,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_dim1_axis0_neg_2,
                            softmax_v1_2::is_ignored_axis_dim1_axis0_neg_2,
-                           softmax_v1_2::examples_axis_dim1_axis0_neg_2);
+                           softmax_v1_2::get_examples_axis_dim1_axis0_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_dim1_axis0_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_dim1_axis0_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_dim1_axis0_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_dim1_axis0_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13270,12 +14512,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim4_axis0_2,
                            softmax_v1_2::is_ignored_axis_relaxed_dim4_axis0_2,
-                           softmax_v1_2::examples_axis_relaxed_dim4_axis0_2);
+                           softmax_v1_2::get_examples_axis_relaxed_dim4_axis0_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim4_axis0_2) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim4_axis0_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim4_axis0_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim4_axis0_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13285,12 +14527,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim4_axis0_neg_2,
                            softmax_v1_2::is_ignored_axis_relaxed_dim4_axis0_neg_2,
-                           softmax_v1_2::examples_axis_relaxed_dim4_axis0_neg_2);
+                           softmax_v1_2::get_examples_axis_relaxed_dim4_axis0_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim4_axis0_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim4_axis0_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim4_axis0_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim4_axis0_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13300,12 +14542,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim4_axis1_2,
                            softmax_v1_2::is_ignored_axis_relaxed_dim4_axis1_2,
-                           softmax_v1_2::examples_axis_relaxed_dim4_axis1_2);
+                           softmax_v1_2::get_examples_axis_relaxed_dim4_axis1_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim4_axis1_2) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim4_axis1_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim4_axis1_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim4_axis1_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13315,12 +14557,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim4_axis1_neg_2,
                            softmax_v1_2::is_ignored_axis_relaxed_dim4_axis1_neg_2,
-                           softmax_v1_2::examples_axis_relaxed_dim4_axis1_neg_2);
+                           softmax_v1_2::get_examples_axis_relaxed_dim4_axis1_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim4_axis1_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim4_axis1_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim4_axis1_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim4_axis1_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13330,12 +14572,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim4_axis2_2,
                            softmax_v1_2::is_ignored_axis_relaxed_dim4_axis2_2,
-                           softmax_v1_2::examples_axis_relaxed_dim4_axis2_2);
+                           softmax_v1_2::get_examples_axis_relaxed_dim4_axis2_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim4_axis2_2) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim4_axis2_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim4_axis2_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim4_axis2_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13345,12 +14587,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim4_axis2_neg_2,
                            softmax_v1_2::is_ignored_axis_relaxed_dim4_axis2_neg_2,
-                           softmax_v1_2::examples_axis_relaxed_dim4_axis2_neg_2);
+                           softmax_v1_2::get_examples_axis_relaxed_dim4_axis2_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim4_axis2_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim4_axis2_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim4_axis2_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim4_axis2_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13360,12 +14602,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim4_axis3_2,
                            softmax_v1_2::is_ignored_axis_relaxed_dim4_axis3_2,
-                           softmax_v1_2::examples_axis_relaxed_dim4_axis3_2);
+                           softmax_v1_2::get_examples_axis_relaxed_dim4_axis3_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim4_axis3_2) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim4_axis3_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim4_axis3_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim4_axis3_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13375,12 +14617,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim4_axis3_neg_2,
                            softmax_v1_2::is_ignored_axis_relaxed_dim4_axis3_neg_2,
-                           softmax_v1_2::examples_axis_relaxed_dim4_axis3_neg_2);
+                           softmax_v1_2::get_examples_axis_relaxed_dim4_axis3_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim4_axis3_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim4_axis3_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim4_axis3_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim4_axis3_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13390,12 +14632,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim3_axis0_2,
                            softmax_v1_2::is_ignored_axis_relaxed_dim3_axis0_2,
-                           softmax_v1_2::examples_axis_relaxed_dim3_axis0_2);
+                           softmax_v1_2::get_examples_axis_relaxed_dim3_axis0_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim3_axis0_2) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim3_axis0_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim3_axis0_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim3_axis0_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13405,12 +14647,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim3_axis0_neg_2,
                            softmax_v1_2::is_ignored_axis_relaxed_dim3_axis0_neg_2,
-                           softmax_v1_2::examples_axis_relaxed_dim3_axis0_neg_2);
+                           softmax_v1_2::get_examples_axis_relaxed_dim3_axis0_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim3_axis0_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim3_axis0_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim3_axis0_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim3_axis0_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13420,12 +14662,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim3_axis1_2,
                            softmax_v1_2::is_ignored_axis_relaxed_dim3_axis1_2,
-                           softmax_v1_2::examples_axis_relaxed_dim3_axis1_2);
+                           softmax_v1_2::get_examples_axis_relaxed_dim3_axis1_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim3_axis1_2) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim3_axis1_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim3_axis1_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim3_axis1_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13435,12 +14677,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim3_axis1_neg_2,
                            softmax_v1_2::is_ignored_axis_relaxed_dim3_axis1_neg_2,
-                           softmax_v1_2::examples_axis_relaxed_dim3_axis1_neg_2);
+                           softmax_v1_2::get_examples_axis_relaxed_dim3_axis1_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim3_axis1_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim3_axis1_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim3_axis1_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim3_axis1_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13450,12 +14692,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim3_axis2_2,
                            softmax_v1_2::is_ignored_axis_relaxed_dim3_axis2_2,
-                           softmax_v1_2::examples_axis_relaxed_dim3_axis2_2);
+                           softmax_v1_2::get_examples_axis_relaxed_dim3_axis2_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim3_axis2_2) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim3_axis2_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim3_axis2_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim3_axis2_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13465,12 +14707,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim3_axis2_neg_2,
                            softmax_v1_2::is_ignored_axis_relaxed_dim3_axis2_neg_2,
-                           softmax_v1_2::examples_axis_relaxed_dim3_axis2_neg_2);
+                           softmax_v1_2::get_examples_axis_relaxed_dim3_axis2_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim3_axis2_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim3_axis2_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim3_axis2_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim3_axis2_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13480,12 +14722,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim2_axis0_2,
                            softmax_v1_2::is_ignored_axis_relaxed_dim2_axis0_2,
-                           softmax_v1_2::examples_axis_relaxed_dim2_axis0_2);
+                           softmax_v1_2::get_examples_axis_relaxed_dim2_axis0_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim2_axis0_2) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim2_axis0_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim2_axis0_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim2_axis0_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13495,12 +14737,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim2_axis0_neg_2,
                            softmax_v1_2::is_ignored_axis_relaxed_dim2_axis0_neg_2,
-                           softmax_v1_2::examples_axis_relaxed_dim2_axis0_neg_2);
+                           softmax_v1_2::get_examples_axis_relaxed_dim2_axis0_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim2_axis0_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim2_axis0_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim2_axis0_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim2_axis0_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13510,12 +14752,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim2_axis1_2,
                            softmax_v1_2::is_ignored_axis_relaxed_dim2_axis1_2,
-                           softmax_v1_2::examples_axis_relaxed_dim2_axis1_2);
+                           softmax_v1_2::get_examples_axis_relaxed_dim2_axis1_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim2_axis1_2) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim2_axis1_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim2_axis1_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim2_axis1_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13525,12 +14767,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim2_axis1_neg_2,
                            softmax_v1_2::is_ignored_axis_relaxed_dim2_axis1_neg_2,
-                           softmax_v1_2::examples_axis_relaxed_dim2_axis1_neg_2);
+                           softmax_v1_2::get_examples_axis_relaxed_dim2_axis1_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim2_axis1_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim2_axis1_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim2_axis1_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim2_axis1_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13540,12 +14782,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim1_axis0_2,
                            softmax_v1_2::is_ignored_axis_relaxed_dim1_axis0_2,
-                           softmax_v1_2::examples_axis_relaxed_dim1_axis0_2);
+                           softmax_v1_2::get_examples_axis_relaxed_dim1_axis0_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim1_axis0_2) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim1_axis0_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim1_axis0_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim1_axis0_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13555,12 +14797,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_relaxed_dim1_axis0_neg_2,
                            softmax_v1_2::is_ignored_axis_relaxed_dim1_axis0_neg_2,
-                           softmax_v1_2::examples_axis_relaxed_dim1_axis0_neg_2);
+                           softmax_v1_2::get_examples_axis_relaxed_dim1_axis0_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_relaxed_dim1_axis0_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_relaxed_dim1_axis0_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_relaxed_dim1_axis0_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_relaxed_dim1_axis0_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13570,12 +14812,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim4_axis0_2,
                            softmax_v1_2::is_ignored_axis_float16_dim4_axis0_2,
-                           softmax_v1_2::examples_axis_float16_dim4_axis0_2);
+                           softmax_v1_2::get_examples_axis_float16_dim4_axis0_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim4_axis0_2) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim4_axis0_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim4_axis0_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim4_axis0_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13585,12 +14827,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim4_axis0_neg_2,
                            softmax_v1_2::is_ignored_axis_float16_dim4_axis0_neg_2,
-                           softmax_v1_2::examples_axis_float16_dim4_axis0_neg_2);
+                           softmax_v1_2::get_examples_axis_float16_dim4_axis0_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim4_axis0_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim4_axis0_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim4_axis0_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim4_axis0_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13600,12 +14842,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim4_axis1_2,
                            softmax_v1_2::is_ignored_axis_float16_dim4_axis1_2,
-                           softmax_v1_2::examples_axis_float16_dim4_axis1_2);
+                           softmax_v1_2::get_examples_axis_float16_dim4_axis1_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim4_axis1_2) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim4_axis1_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim4_axis1_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim4_axis1_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13615,12 +14857,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim4_axis1_neg_2,
                            softmax_v1_2::is_ignored_axis_float16_dim4_axis1_neg_2,
-                           softmax_v1_2::examples_axis_float16_dim4_axis1_neg_2);
+                           softmax_v1_2::get_examples_axis_float16_dim4_axis1_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim4_axis1_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim4_axis1_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim4_axis1_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim4_axis1_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13630,12 +14872,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim4_axis2_2,
                            softmax_v1_2::is_ignored_axis_float16_dim4_axis2_2,
-                           softmax_v1_2::examples_axis_float16_dim4_axis2_2);
+                           softmax_v1_2::get_examples_axis_float16_dim4_axis2_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim4_axis2_2) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim4_axis2_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim4_axis2_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim4_axis2_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13645,12 +14887,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim4_axis2_neg_2,
                            softmax_v1_2::is_ignored_axis_float16_dim4_axis2_neg_2,
-                           softmax_v1_2::examples_axis_float16_dim4_axis2_neg_2);
+                           softmax_v1_2::get_examples_axis_float16_dim4_axis2_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim4_axis2_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim4_axis2_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim4_axis2_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim4_axis2_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13660,12 +14902,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim4_axis3_2,
                            softmax_v1_2::is_ignored_axis_float16_dim4_axis3_2,
-                           softmax_v1_2::examples_axis_float16_dim4_axis3_2);
+                           softmax_v1_2::get_examples_axis_float16_dim4_axis3_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim4_axis3_2) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim4_axis3_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim4_axis3_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim4_axis3_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13675,12 +14917,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim4_axis3_neg_2,
                            softmax_v1_2::is_ignored_axis_float16_dim4_axis3_neg_2,
-                           softmax_v1_2::examples_axis_float16_dim4_axis3_neg_2);
+                           softmax_v1_2::get_examples_axis_float16_dim4_axis3_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim4_axis3_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim4_axis3_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim4_axis3_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim4_axis3_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13690,12 +14932,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim3_axis0_2,
                            softmax_v1_2::is_ignored_axis_float16_dim3_axis0_2,
-                           softmax_v1_2::examples_axis_float16_dim3_axis0_2);
+                           softmax_v1_2::get_examples_axis_float16_dim3_axis0_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim3_axis0_2) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim3_axis0_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim3_axis0_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim3_axis0_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13705,12 +14947,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim3_axis0_neg_2,
                            softmax_v1_2::is_ignored_axis_float16_dim3_axis0_neg_2,
-                           softmax_v1_2::examples_axis_float16_dim3_axis0_neg_2);
+                           softmax_v1_2::get_examples_axis_float16_dim3_axis0_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim3_axis0_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim3_axis0_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim3_axis0_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim3_axis0_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13720,12 +14962,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim3_axis1_2,
                            softmax_v1_2::is_ignored_axis_float16_dim3_axis1_2,
-                           softmax_v1_2::examples_axis_float16_dim3_axis1_2);
+                           softmax_v1_2::get_examples_axis_float16_dim3_axis1_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim3_axis1_2) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim3_axis1_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim3_axis1_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim3_axis1_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13735,12 +14977,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim3_axis1_neg_2,
                            softmax_v1_2::is_ignored_axis_float16_dim3_axis1_neg_2,
-                           softmax_v1_2::examples_axis_float16_dim3_axis1_neg_2);
+                           softmax_v1_2::get_examples_axis_float16_dim3_axis1_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim3_axis1_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim3_axis1_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim3_axis1_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim3_axis1_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13750,12 +14992,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim3_axis2_2,
                            softmax_v1_2::is_ignored_axis_float16_dim3_axis2_2,
-                           softmax_v1_2::examples_axis_float16_dim3_axis2_2);
+                           softmax_v1_2::get_examples_axis_float16_dim3_axis2_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim3_axis2_2) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim3_axis2_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim3_axis2_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim3_axis2_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13765,12 +15007,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim3_axis2_neg_2,
                            softmax_v1_2::is_ignored_axis_float16_dim3_axis2_neg_2,
-                           softmax_v1_2::examples_axis_float16_dim3_axis2_neg_2);
+                           softmax_v1_2::get_examples_axis_float16_dim3_axis2_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim3_axis2_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim3_axis2_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim3_axis2_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim3_axis2_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13780,12 +15022,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim2_axis0_2,
                            softmax_v1_2::is_ignored_axis_float16_dim2_axis0_2,
-                           softmax_v1_2::examples_axis_float16_dim2_axis0_2);
+                           softmax_v1_2::get_examples_axis_float16_dim2_axis0_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim2_axis0_2) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim2_axis0_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim2_axis0_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim2_axis0_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13795,12 +15037,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim2_axis0_neg_2,
                            softmax_v1_2::is_ignored_axis_float16_dim2_axis0_neg_2,
-                           softmax_v1_2::examples_axis_float16_dim2_axis0_neg_2);
+                           softmax_v1_2::get_examples_axis_float16_dim2_axis0_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim2_axis0_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim2_axis0_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim2_axis0_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim2_axis0_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13810,12 +15052,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim2_axis1_2,
                            softmax_v1_2::is_ignored_axis_float16_dim2_axis1_2,
-                           softmax_v1_2::examples_axis_float16_dim2_axis1_2);
+                           softmax_v1_2::get_examples_axis_float16_dim2_axis1_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim2_axis1_2) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim2_axis1_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim2_axis1_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim2_axis1_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13825,12 +15067,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim2_axis1_neg_2,
                            softmax_v1_2::is_ignored_axis_float16_dim2_axis1_neg_2,
-                           softmax_v1_2::examples_axis_float16_dim2_axis1_neg_2);
+                           softmax_v1_2::get_examples_axis_float16_dim2_axis1_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim2_axis1_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim2_axis1_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim2_axis1_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim2_axis1_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13840,12 +15082,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim1_axis0_2,
                            softmax_v1_2::is_ignored_axis_float16_dim1_axis0_2,
-                           softmax_v1_2::examples_axis_float16_dim1_axis0_2);
+                           softmax_v1_2::get_examples_axis_float16_dim1_axis0_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim1_axis0_2) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim1_axis0_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim1_axis0_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim1_axis0_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13855,12 +15097,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_float16_dim1_axis0_neg_2,
                            softmax_v1_2::is_ignored_axis_float16_dim1_axis0_neg_2,
-                           softmax_v1_2::examples_axis_float16_dim1_axis0_neg_2);
+                           softmax_v1_2::get_examples_axis_float16_dim1_axis0_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_float16_dim1_axis0_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_float16_dim1_axis0_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_float16_dim1_axis0_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_float16_dim1_axis0_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13870,12 +15112,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim4_axis0_2,
                            softmax_v1_2::is_ignored_axis_quant8_dim4_axis0_2,
-                           softmax_v1_2::examples_axis_quant8_dim4_axis0_2);
+                           softmax_v1_2::get_examples_axis_quant8_dim4_axis0_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim4_axis0_2) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim4_axis0_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim4_axis0_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim4_axis0_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13885,12 +15127,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim4_axis0_neg_2,
                            softmax_v1_2::is_ignored_axis_quant8_dim4_axis0_neg_2,
-                           softmax_v1_2::examples_axis_quant8_dim4_axis0_neg_2);
+                           softmax_v1_2::get_examples_axis_quant8_dim4_axis0_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim4_axis0_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim4_axis0_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim4_axis0_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim4_axis0_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13900,12 +15142,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim4_axis1_2,
                            softmax_v1_2::is_ignored_axis_quant8_dim4_axis1_2,
-                           softmax_v1_2::examples_axis_quant8_dim4_axis1_2);
+                           softmax_v1_2::get_examples_axis_quant8_dim4_axis1_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim4_axis1_2) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim4_axis1_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim4_axis1_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim4_axis1_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13915,12 +15157,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim4_axis1_neg_2,
                            softmax_v1_2::is_ignored_axis_quant8_dim4_axis1_neg_2,
-                           softmax_v1_2::examples_axis_quant8_dim4_axis1_neg_2);
+                           softmax_v1_2::get_examples_axis_quant8_dim4_axis1_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim4_axis1_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim4_axis1_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim4_axis1_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim4_axis1_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13930,12 +15172,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim4_axis2_2,
                            softmax_v1_2::is_ignored_axis_quant8_dim4_axis2_2,
-                           softmax_v1_2::examples_axis_quant8_dim4_axis2_2);
+                           softmax_v1_2::get_examples_axis_quant8_dim4_axis2_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim4_axis2_2) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim4_axis2_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim4_axis2_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim4_axis2_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13945,12 +15187,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim4_axis2_neg_2,
                            softmax_v1_2::is_ignored_axis_quant8_dim4_axis2_neg_2,
-                           softmax_v1_2::examples_axis_quant8_dim4_axis2_neg_2);
+                           softmax_v1_2::get_examples_axis_quant8_dim4_axis2_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim4_axis2_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim4_axis2_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim4_axis2_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim4_axis2_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13960,12 +15202,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim4_axis3_2,
                            softmax_v1_2::is_ignored_axis_quant8_dim4_axis3_2,
-                           softmax_v1_2::examples_axis_quant8_dim4_axis3_2);
+                           softmax_v1_2::get_examples_axis_quant8_dim4_axis3_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim4_axis3_2) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim4_axis3_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim4_axis3_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim4_axis3_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13975,12 +15217,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim4_axis3_neg_2,
                            softmax_v1_2::is_ignored_axis_quant8_dim4_axis3_neg_2,
-                           softmax_v1_2::examples_axis_quant8_dim4_axis3_neg_2);
+                           softmax_v1_2::get_examples_axis_quant8_dim4_axis3_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim4_axis3_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim4_axis3_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim4_axis3_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim4_axis3_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -13990,12 +15232,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim3_axis0_2,
                            softmax_v1_2::is_ignored_axis_quant8_dim3_axis0_2,
-                           softmax_v1_2::examples_axis_quant8_dim3_axis0_2);
+                           softmax_v1_2::get_examples_axis_quant8_dim3_axis0_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim3_axis0_2) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim3_axis0_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim3_axis0_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim3_axis0_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14005,12 +15247,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim3_axis0_neg_2,
                            softmax_v1_2::is_ignored_axis_quant8_dim3_axis0_neg_2,
-                           softmax_v1_2::examples_axis_quant8_dim3_axis0_neg_2);
+                           softmax_v1_2::get_examples_axis_quant8_dim3_axis0_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim3_axis0_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim3_axis0_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim3_axis0_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim3_axis0_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14020,12 +15262,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim3_axis1_2,
                            softmax_v1_2::is_ignored_axis_quant8_dim3_axis1_2,
-                           softmax_v1_2::examples_axis_quant8_dim3_axis1_2);
+                           softmax_v1_2::get_examples_axis_quant8_dim3_axis1_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim3_axis1_2) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim3_axis1_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim3_axis1_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim3_axis1_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14035,12 +15277,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim3_axis1_neg_2,
                            softmax_v1_2::is_ignored_axis_quant8_dim3_axis1_neg_2,
-                           softmax_v1_2::examples_axis_quant8_dim3_axis1_neg_2);
+                           softmax_v1_2::get_examples_axis_quant8_dim3_axis1_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim3_axis1_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim3_axis1_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim3_axis1_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim3_axis1_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14050,12 +15292,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim3_axis2_2,
                            softmax_v1_2::is_ignored_axis_quant8_dim3_axis2_2,
-                           softmax_v1_2::examples_axis_quant8_dim3_axis2_2);
+                           softmax_v1_2::get_examples_axis_quant8_dim3_axis2_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim3_axis2_2) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim3_axis2_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim3_axis2_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim3_axis2_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14065,12 +15307,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim3_axis2_neg_2,
                            softmax_v1_2::is_ignored_axis_quant8_dim3_axis2_neg_2,
-                           softmax_v1_2::examples_axis_quant8_dim3_axis2_neg_2);
+                           softmax_v1_2::get_examples_axis_quant8_dim3_axis2_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim3_axis2_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim3_axis2_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim3_axis2_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim3_axis2_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14080,12 +15322,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim2_axis0_2,
                            softmax_v1_2::is_ignored_axis_quant8_dim2_axis0_2,
-                           softmax_v1_2::examples_axis_quant8_dim2_axis0_2);
+                           softmax_v1_2::get_examples_axis_quant8_dim2_axis0_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim2_axis0_2) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim2_axis0_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim2_axis0_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim2_axis0_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14095,12 +15337,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim2_axis0_neg_2,
                            softmax_v1_2::is_ignored_axis_quant8_dim2_axis0_neg_2,
-                           softmax_v1_2::examples_axis_quant8_dim2_axis0_neg_2);
+                           softmax_v1_2::get_examples_axis_quant8_dim2_axis0_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim2_axis0_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim2_axis0_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim2_axis0_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim2_axis0_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14110,12 +15352,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim2_axis1_2,
                            softmax_v1_2::is_ignored_axis_quant8_dim2_axis1_2,
-                           softmax_v1_2::examples_axis_quant8_dim2_axis1_2);
+                           softmax_v1_2::get_examples_axis_quant8_dim2_axis1_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim2_axis1_2) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim2_axis1_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim2_axis1_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim2_axis1_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14125,12 +15367,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim2_axis1_neg_2,
                            softmax_v1_2::is_ignored_axis_quant8_dim2_axis1_neg_2,
-                           softmax_v1_2::examples_axis_quant8_dim2_axis1_neg_2);
+                           softmax_v1_2::get_examples_axis_quant8_dim2_axis1_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim2_axis1_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim2_axis1_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim2_axis1_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim2_axis1_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14140,12 +15382,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim1_axis0_2,
                            softmax_v1_2::is_ignored_axis_quant8_dim1_axis0_2,
-                           softmax_v1_2::examples_axis_quant8_dim1_axis0_2);
+                           softmax_v1_2::get_examples_axis_quant8_dim1_axis0_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim1_axis0_2) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim1_axis0_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim1_axis0_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim1_axis0_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14155,12 +15397,12 @@
   generated_tests::Execute(device,
                            softmax_v1_2::createTestModel_axis_quant8_dim1_axis0_neg_2,
                            softmax_v1_2::is_ignored_axis_quant8_dim1_axis0_neg_2,
-                           softmax_v1_2::examples_axis_quant8_dim1_axis0_neg_2);
+                           softmax_v1_2::get_examples_axis_quant8_dim1_axis0_neg_2());
 }
 
 TEST_F(ValidationTest, softmax_v1_2_axis_quant8_dim1_axis0_neg_2) {
   const Model model = softmax_v1_2::createTestModel_axis_quant8_dim1_axis0_neg_2();
-  const std::vector<Request> requests = createRequests(softmax_v1_2::examples_axis_quant8_dim1_axis0_neg_2);
+  const std::vector<Request> requests = createRequests(softmax_v1_2::get_examples_axis_quant8_dim1_axis0_neg_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14178,12 +15420,12 @@
   generated_tests::Execute(device,
                            space_to_batch_v1_2::createTestModel_nhwc,
                            space_to_batch_v1_2::is_ignored_nhwc,
-                           space_to_batch_v1_2::examples_nhwc);
+                           space_to_batch_v1_2::get_examples_nhwc());
 }
 
 TEST_F(ValidationTest, space_to_batch_v1_2_nhwc) {
   const Model model = space_to_batch_v1_2::createTestModel_nhwc();
-  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::examples_nhwc);
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nhwc());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14193,12 +15435,27 @@
   generated_tests::Execute(device,
                            space_to_batch_v1_2::createTestModel_nhwc_relaxed,
                            space_to_batch_v1_2::is_ignored_nhwc_relaxed,
-                           space_to_batch_v1_2::examples_nhwc_relaxed);
+                           space_to_batch_v1_2::get_examples_nhwc_relaxed());
 }
 
 TEST_F(ValidationTest, space_to_batch_v1_2_nhwc_relaxed) {
   const Model model = space_to_batch_v1_2::createTestModel_nhwc_relaxed();
-  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::examples_nhwc_relaxed);
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nhwc_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, space_to_batch_v1_2_nhwc_float16) {
+  generated_tests::Execute(device,
+                           space_to_batch_v1_2::createTestModel_nhwc_float16,
+                           space_to_batch_v1_2::is_ignored_nhwc_float16,
+                           space_to_batch_v1_2::get_examples_nhwc_float16());
+}
+
+TEST_F(ValidationTest, space_to_batch_v1_2_nhwc_float16) {
+  const Model model = space_to_batch_v1_2::createTestModel_nhwc_float16();
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nhwc_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14208,12 +15465,12 @@
   generated_tests::Execute(device,
                            space_to_batch_v1_2::createTestModel_nhwc_quant8,
                            space_to_batch_v1_2::is_ignored_nhwc_quant8,
-                           space_to_batch_v1_2::examples_nhwc_quant8);
+                           space_to_batch_v1_2::get_examples_nhwc_quant8());
 }
 
 TEST_F(ValidationTest, space_to_batch_v1_2_nhwc_quant8) {
   const Model model = space_to_batch_v1_2::createTestModel_nhwc_quant8();
-  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::examples_nhwc_quant8);
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nhwc_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14223,12 +15480,12 @@
   generated_tests::Execute(device,
                            space_to_batch_v1_2::createTestModel_nchw,
                            space_to_batch_v1_2::is_ignored_nchw,
-                           space_to_batch_v1_2::examples_nchw);
+                           space_to_batch_v1_2::get_examples_nchw());
 }
 
 TEST_F(ValidationTest, space_to_batch_v1_2_nchw) {
   const Model model = space_to_batch_v1_2::createTestModel_nchw();
-  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::examples_nchw);
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nchw());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14238,12 +15495,27 @@
   generated_tests::Execute(device,
                            space_to_batch_v1_2::createTestModel_nchw_relaxed,
                            space_to_batch_v1_2::is_ignored_nchw_relaxed,
-                           space_to_batch_v1_2::examples_nchw_relaxed);
+                           space_to_batch_v1_2::get_examples_nchw_relaxed());
 }
 
 TEST_F(ValidationTest, space_to_batch_v1_2_nchw_relaxed) {
   const Model model = space_to_batch_v1_2::createTestModel_nchw_relaxed();
-  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::examples_nchw_relaxed);
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nchw_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, space_to_batch_v1_2_nchw_float16) {
+  generated_tests::Execute(device,
+                           space_to_batch_v1_2::createTestModel_nchw_float16,
+                           space_to_batch_v1_2::is_ignored_nchw_float16,
+                           space_to_batch_v1_2::get_examples_nchw_float16());
+}
+
+TEST_F(ValidationTest, space_to_batch_v1_2_nchw_float16) {
+  const Model model = space_to_batch_v1_2::createTestModel_nchw_float16();
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nchw_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14253,12 +15525,12 @@
   generated_tests::Execute(device,
                            space_to_batch_v1_2::createTestModel_nchw_quant8,
                            space_to_batch_v1_2::is_ignored_nchw_quant8,
-                           space_to_batch_v1_2::examples_nchw_quant8);
+                           space_to_batch_v1_2::get_examples_nchw_quant8());
 }
 
 TEST_F(ValidationTest, space_to_batch_v1_2_nchw_quant8) {
   const Model model = space_to_batch_v1_2::createTestModel_nchw_quant8();
-  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::examples_nchw_quant8);
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nchw_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14268,12 +15540,12 @@
   generated_tests::Execute(device,
                            space_to_batch_v1_2::createTestModel_nhwc_2,
                            space_to_batch_v1_2::is_ignored_nhwc_2,
-                           space_to_batch_v1_2::examples_nhwc_2);
+                           space_to_batch_v1_2::get_examples_nhwc_2());
 }
 
 TEST_F(ValidationTest, space_to_batch_v1_2_nhwc_2) {
   const Model model = space_to_batch_v1_2::createTestModel_nhwc_2();
-  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::examples_nhwc_2);
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nhwc_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14283,12 +15555,27 @@
   generated_tests::Execute(device,
                            space_to_batch_v1_2::createTestModel_nhwc_relaxed_2,
                            space_to_batch_v1_2::is_ignored_nhwc_relaxed_2,
-                           space_to_batch_v1_2::examples_nhwc_relaxed_2);
+                           space_to_batch_v1_2::get_examples_nhwc_relaxed_2());
 }
 
 TEST_F(ValidationTest, space_to_batch_v1_2_nhwc_relaxed_2) {
   const Model model = space_to_batch_v1_2::createTestModel_nhwc_relaxed_2();
-  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::examples_nhwc_relaxed_2);
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nhwc_relaxed_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, space_to_batch_v1_2_nhwc_float16_2) {
+  generated_tests::Execute(device,
+                           space_to_batch_v1_2::createTestModel_nhwc_float16_2,
+                           space_to_batch_v1_2::is_ignored_nhwc_float16_2,
+                           space_to_batch_v1_2::get_examples_nhwc_float16_2());
+}
+
+TEST_F(ValidationTest, space_to_batch_v1_2_nhwc_float16_2) {
+  const Model model = space_to_batch_v1_2::createTestModel_nhwc_float16_2();
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nhwc_float16_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14298,12 +15585,12 @@
   generated_tests::Execute(device,
                            space_to_batch_v1_2::createTestModel_nhwc_quant8_2,
                            space_to_batch_v1_2::is_ignored_nhwc_quant8_2,
-                           space_to_batch_v1_2::examples_nhwc_quant8_2);
+                           space_to_batch_v1_2::get_examples_nhwc_quant8_2());
 }
 
 TEST_F(ValidationTest, space_to_batch_v1_2_nhwc_quant8_2) {
   const Model model = space_to_batch_v1_2::createTestModel_nhwc_quant8_2();
-  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::examples_nhwc_quant8_2);
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nhwc_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14313,12 +15600,12 @@
   generated_tests::Execute(device,
                            space_to_batch_v1_2::createTestModel_nchw_2,
                            space_to_batch_v1_2::is_ignored_nchw_2,
-                           space_to_batch_v1_2::examples_nchw_2);
+                           space_to_batch_v1_2::get_examples_nchw_2());
 }
 
 TEST_F(ValidationTest, space_to_batch_v1_2_nchw_2) {
   const Model model = space_to_batch_v1_2::createTestModel_nchw_2();
-  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::examples_nchw_2);
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nchw_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14328,12 +15615,27 @@
   generated_tests::Execute(device,
                            space_to_batch_v1_2::createTestModel_nchw_relaxed_2,
                            space_to_batch_v1_2::is_ignored_nchw_relaxed_2,
-                           space_to_batch_v1_2::examples_nchw_relaxed_2);
+                           space_to_batch_v1_2::get_examples_nchw_relaxed_2());
 }
 
 TEST_F(ValidationTest, space_to_batch_v1_2_nchw_relaxed_2) {
   const Model model = space_to_batch_v1_2::createTestModel_nchw_relaxed_2();
-  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::examples_nchw_relaxed_2);
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nchw_relaxed_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, space_to_batch_v1_2_nchw_float16_2) {
+  generated_tests::Execute(device,
+                           space_to_batch_v1_2::createTestModel_nchw_float16_2,
+                           space_to_batch_v1_2::is_ignored_nchw_float16_2,
+                           space_to_batch_v1_2::get_examples_nchw_float16_2());
+}
+
+TEST_F(ValidationTest, space_to_batch_v1_2_nchw_float16_2) {
+  const Model model = space_to_batch_v1_2::createTestModel_nchw_float16_2();
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nchw_float16_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14343,12 +15645,12 @@
   generated_tests::Execute(device,
                            space_to_batch_v1_2::createTestModel_nchw_quant8_2,
                            space_to_batch_v1_2::is_ignored_nchw_quant8_2,
-                           space_to_batch_v1_2::examples_nchw_quant8_2);
+                           space_to_batch_v1_2::get_examples_nchw_quant8_2());
 }
 
 TEST_F(ValidationTest, space_to_batch_v1_2_nchw_quant8_2) {
   const Model model = space_to_batch_v1_2::createTestModel_nchw_quant8_2();
-  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::examples_nchw_quant8_2);
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nchw_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14358,12 +15660,12 @@
   generated_tests::Execute(device,
                            space_to_batch_v1_2::createTestModel_nhwc_3,
                            space_to_batch_v1_2::is_ignored_nhwc_3,
-                           space_to_batch_v1_2::examples_nhwc_3);
+                           space_to_batch_v1_2::get_examples_nhwc_3());
 }
 
 TEST_F(ValidationTest, space_to_batch_v1_2_nhwc_3) {
   const Model model = space_to_batch_v1_2::createTestModel_nhwc_3();
-  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::examples_nhwc_3);
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nhwc_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14373,12 +15675,27 @@
   generated_tests::Execute(device,
                            space_to_batch_v1_2::createTestModel_nhwc_relaxed_3,
                            space_to_batch_v1_2::is_ignored_nhwc_relaxed_3,
-                           space_to_batch_v1_2::examples_nhwc_relaxed_3);
+                           space_to_batch_v1_2::get_examples_nhwc_relaxed_3());
 }
 
 TEST_F(ValidationTest, space_to_batch_v1_2_nhwc_relaxed_3) {
   const Model model = space_to_batch_v1_2::createTestModel_nhwc_relaxed_3();
-  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::examples_nhwc_relaxed_3);
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nhwc_relaxed_3());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, space_to_batch_v1_2_nhwc_float16_3) {
+  generated_tests::Execute(device,
+                           space_to_batch_v1_2::createTestModel_nhwc_float16_3,
+                           space_to_batch_v1_2::is_ignored_nhwc_float16_3,
+                           space_to_batch_v1_2::get_examples_nhwc_float16_3());
+}
+
+TEST_F(ValidationTest, space_to_batch_v1_2_nhwc_float16_3) {
+  const Model model = space_to_batch_v1_2::createTestModel_nhwc_float16_3();
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nhwc_float16_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14388,12 +15705,12 @@
   generated_tests::Execute(device,
                            space_to_batch_v1_2::createTestModel_nhwc_quant8_3,
                            space_to_batch_v1_2::is_ignored_nhwc_quant8_3,
-                           space_to_batch_v1_2::examples_nhwc_quant8_3);
+                           space_to_batch_v1_2::get_examples_nhwc_quant8_3());
 }
 
 TEST_F(ValidationTest, space_to_batch_v1_2_nhwc_quant8_3) {
   const Model model = space_to_batch_v1_2::createTestModel_nhwc_quant8_3();
-  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::examples_nhwc_quant8_3);
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nhwc_quant8_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14403,12 +15720,12 @@
   generated_tests::Execute(device,
                            space_to_batch_v1_2::createTestModel_nchw_3,
                            space_to_batch_v1_2::is_ignored_nchw_3,
-                           space_to_batch_v1_2::examples_nchw_3);
+                           space_to_batch_v1_2::get_examples_nchw_3());
 }
 
 TEST_F(ValidationTest, space_to_batch_v1_2_nchw_3) {
   const Model model = space_to_batch_v1_2::createTestModel_nchw_3();
-  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::examples_nchw_3);
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nchw_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14418,12 +15735,27 @@
   generated_tests::Execute(device,
                            space_to_batch_v1_2::createTestModel_nchw_relaxed_3,
                            space_to_batch_v1_2::is_ignored_nchw_relaxed_3,
-                           space_to_batch_v1_2::examples_nchw_relaxed_3);
+                           space_to_batch_v1_2::get_examples_nchw_relaxed_3());
 }
 
 TEST_F(ValidationTest, space_to_batch_v1_2_nchw_relaxed_3) {
   const Model model = space_to_batch_v1_2::createTestModel_nchw_relaxed_3();
-  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::examples_nchw_relaxed_3);
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nchw_relaxed_3());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, space_to_batch_v1_2_nchw_float16_3) {
+  generated_tests::Execute(device,
+                           space_to_batch_v1_2::createTestModel_nchw_float16_3,
+                           space_to_batch_v1_2::is_ignored_nchw_float16_3,
+                           space_to_batch_v1_2::get_examples_nchw_float16_3());
+}
+
+TEST_F(ValidationTest, space_to_batch_v1_2_nchw_float16_3) {
+  const Model model = space_to_batch_v1_2::createTestModel_nchw_float16_3();
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nchw_float16_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14433,12 +15765,12 @@
   generated_tests::Execute(device,
                            space_to_batch_v1_2::createTestModel_nchw_quant8_3,
                            space_to_batch_v1_2::is_ignored_nchw_quant8_3,
-                           space_to_batch_v1_2::examples_nchw_quant8_3);
+                           space_to_batch_v1_2::get_examples_nchw_quant8_3());
 }
 
 TEST_F(ValidationTest, space_to_batch_v1_2_nchw_quant8_3) {
   const Model model = space_to_batch_v1_2::createTestModel_nchw_quant8_3();
-  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::examples_nchw_quant8_3);
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nchw_quant8_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14448,12 +15780,12 @@
   generated_tests::Execute(device,
                            space_to_batch_v1_2::createTestModel_nhwc_4,
                            space_to_batch_v1_2::is_ignored_nhwc_4,
-                           space_to_batch_v1_2::examples_nhwc_4);
+                           space_to_batch_v1_2::get_examples_nhwc_4());
 }
 
 TEST_F(ValidationTest, space_to_batch_v1_2_nhwc_4) {
   const Model model = space_to_batch_v1_2::createTestModel_nhwc_4();
-  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::examples_nhwc_4);
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nhwc_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14463,12 +15795,27 @@
   generated_tests::Execute(device,
                            space_to_batch_v1_2::createTestModel_nhwc_relaxed_4,
                            space_to_batch_v1_2::is_ignored_nhwc_relaxed_4,
-                           space_to_batch_v1_2::examples_nhwc_relaxed_4);
+                           space_to_batch_v1_2::get_examples_nhwc_relaxed_4());
 }
 
 TEST_F(ValidationTest, space_to_batch_v1_2_nhwc_relaxed_4) {
   const Model model = space_to_batch_v1_2::createTestModel_nhwc_relaxed_4();
-  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::examples_nhwc_relaxed_4);
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nhwc_relaxed_4());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, space_to_batch_v1_2_nhwc_float16_4) {
+  generated_tests::Execute(device,
+                           space_to_batch_v1_2::createTestModel_nhwc_float16_4,
+                           space_to_batch_v1_2::is_ignored_nhwc_float16_4,
+                           space_to_batch_v1_2::get_examples_nhwc_float16_4());
+}
+
+TEST_F(ValidationTest, space_to_batch_v1_2_nhwc_float16_4) {
+  const Model model = space_to_batch_v1_2::createTestModel_nhwc_float16_4();
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nhwc_float16_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14478,12 +15825,12 @@
   generated_tests::Execute(device,
                            space_to_batch_v1_2::createTestModel_nhwc_quant8_4,
                            space_to_batch_v1_2::is_ignored_nhwc_quant8_4,
-                           space_to_batch_v1_2::examples_nhwc_quant8_4);
+                           space_to_batch_v1_2::get_examples_nhwc_quant8_4());
 }
 
 TEST_F(ValidationTest, space_to_batch_v1_2_nhwc_quant8_4) {
   const Model model = space_to_batch_v1_2::createTestModel_nhwc_quant8_4();
-  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::examples_nhwc_quant8_4);
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nhwc_quant8_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14493,12 +15840,12 @@
   generated_tests::Execute(device,
                            space_to_batch_v1_2::createTestModel_nchw_4,
                            space_to_batch_v1_2::is_ignored_nchw_4,
-                           space_to_batch_v1_2::examples_nchw_4);
+                           space_to_batch_v1_2::get_examples_nchw_4());
 }
 
 TEST_F(ValidationTest, space_to_batch_v1_2_nchw_4) {
   const Model model = space_to_batch_v1_2::createTestModel_nchw_4();
-  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::examples_nchw_4);
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nchw_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14508,12 +15855,27 @@
   generated_tests::Execute(device,
                            space_to_batch_v1_2::createTestModel_nchw_relaxed_4,
                            space_to_batch_v1_2::is_ignored_nchw_relaxed_4,
-                           space_to_batch_v1_2::examples_nchw_relaxed_4);
+                           space_to_batch_v1_2::get_examples_nchw_relaxed_4());
 }
 
 TEST_F(ValidationTest, space_to_batch_v1_2_nchw_relaxed_4) {
   const Model model = space_to_batch_v1_2::createTestModel_nchw_relaxed_4();
-  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::examples_nchw_relaxed_4);
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nchw_relaxed_4());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, space_to_batch_v1_2_nchw_float16_4) {
+  generated_tests::Execute(device,
+                           space_to_batch_v1_2::createTestModel_nchw_float16_4,
+                           space_to_batch_v1_2::is_ignored_nchw_float16_4,
+                           space_to_batch_v1_2::get_examples_nchw_float16_4());
+}
+
+TEST_F(ValidationTest, space_to_batch_v1_2_nchw_float16_4) {
+  const Model model = space_to_batch_v1_2::createTestModel_nchw_float16_4();
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nchw_float16_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14523,12 +15885,12 @@
   generated_tests::Execute(device,
                            space_to_batch_v1_2::createTestModel_nchw_quant8_4,
                            space_to_batch_v1_2::is_ignored_nchw_quant8_4,
-                           space_to_batch_v1_2::examples_nchw_quant8_4);
+                           space_to_batch_v1_2::get_examples_nchw_quant8_4());
 }
 
 TEST_F(ValidationTest, space_to_batch_v1_2_nchw_quant8_4) {
   const Model model = space_to_batch_v1_2::createTestModel_nchw_quant8_4();
-  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::examples_nchw_quant8_4);
+  const std::vector<Request> requests = createRequests(space_to_batch_v1_2::get_examples_nchw_quant8_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14546,12 +15908,12 @@
   generated_tests::Execute(device,
                            space_to_depth_v1_2::createTestModel_nhwc,
                            space_to_depth_v1_2::is_ignored_nhwc,
-                           space_to_depth_v1_2::examples_nhwc);
+                           space_to_depth_v1_2::get_examples_nhwc());
 }
 
 TEST_F(ValidationTest, space_to_depth_v1_2_nhwc) {
   const Model model = space_to_depth_v1_2::createTestModel_nhwc();
-  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::examples_nhwc);
+  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::get_examples_nhwc());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14561,12 +15923,27 @@
   generated_tests::Execute(device,
                            space_to_depth_v1_2::createTestModel_nhwc_relaxed,
                            space_to_depth_v1_2::is_ignored_nhwc_relaxed,
-                           space_to_depth_v1_2::examples_nhwc_relaxed);
+                           space_to_depth_v1_2::get_examples_nhwc_relaxed());
 }
 
 TEST_F(ValidationTest, space_to_depth_v1_2_nhwc_relaxed) {
   const Model model = space_to_depth_v1_2::createTestModel_nhwc_relaxed();
-  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::examples_nhwc_relaxed);
+  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::get_examples_nhwc_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, space_to_depth_v1_2_nhwc_float16) {
+  generated_tests::Execute(device,
+                           space_to_depth_v1_2::createTestModel_nhwc_float16,
+                           space_to_depth_v1_2::is_ignored_nhwc_float16,
+                           space_to_depth_v1_2::get_examples_nhwc_float16());
+}
+
+TEST_F(ValidationTest, space_to_depth_v1_2_nhwc_float16) {
+  const Model model = space_to_depth_v1_2::createTestModel_nhwc_float16();
+  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::get_examples_nhwc_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14576,12 +15953,12 @@
   generated_tests::Execute(device,
                            space_to_depth_v1_2::createTestModel_nhwc_quant8,
                            space_to_depth_v1_2::is_ignored_nhwc_quant8,
-                           space_to_depth_v1_2::examples_nhwc_quant8);
+                           space_to_depth_v1_2::get_examples_nhwc_quant8());
 }
 
 TEST_F(ValidationTest, space_to_depth_v1_2_nhwc_quant8) {
   const Model model = space_to_depth_v1_2::createTestModel_nhwc_quant8();
-  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::examples_nhwc_quant8);
+  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::get_examples_nhwc_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14591,12 +15968,12 @@
   generated_tests::Execute(device,
                            space_to_depth_v1_2::createTestModel_nchw,
                            space_to_depth_v1_2::is_ignored_nchw,
-                           space_to_depth_v1_2::examples_nchw);
+                           space_to_depth_v1_2::get_examples_nchw());
 }
 
 TEST_F(ValidationTest, space_to_depth_v1_2_nchw) {
   const Model model = space_to_depth_v1_2::createTestModel_nchw();
-  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::examples_nchw);
+  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::get_examples_nchw());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14606,12 +15983,27 @@
   generated_tests::Execute(device,
                            space_to_depth_v1_2::createTestModel_nchw_relaxed,
                            space_to_depth_v1_2::is_ignored_nchw_relaxed,
-                           space_to_depth_v1_2::examples_nchw_relaxed);
+                           space_to_depth_v1_2::get_examples_nchw_relaxed());
 }
 
 TEST_F(ValidationTest, space_to_depth_v1_2_nchw_relaxed) {
   const Model model = space_to_depth_v1_2::createTestModel_nchw_relaxed();
-  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::examples_nchw_relaxed);
+  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::get_examples_nchw_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, space_to_depth_v1_2_nchw_float16) {
+  generated_tests::Execute(device,
+                           space_to_depth_v1_2::createTestModel_nchw_float16,
+                           space_to_depth_v1_2::is_ignored_nchw_float16,
+                           space_to_depth_v1_2::get_examples_nchw_float16());
+}
+
+TEST_F(ValidationTest, space_to_depth_v1_2_nchw_float16) {
+  const Model model = space_to_depth_v1_2::createTestModel_nchw_float16();
+  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::get_examples_nchw_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14621,12 +16013,12 @@
   generated_tests::Execute(device,
                            space_to_depth_v1_2::createTestModel_nchw_quant8,
                            space_to_depth_v1_2::is_ignored_nchw_quant8,
-                           space_to_depth_v1_2::examples_nchw_quant8);
+                           space_to_depth_v1_2::get_examples_nchw_quant8());
 }
 
 TEST_F(ValidationTest, space_to_depth_v1_2_nchw_quant8) {
   const Model model = space_to_depth_v1_2::createTestModel_nchw_quant8();
-  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::examples_nchw_quant8);
+  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::get_examples_nchw_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14636,12 +16028,12 @@
   generated_tests::Execute(device,
                            space_to_depth_v1_2::createTestModel_nhwc_2,
                            space_to_depth_v1_2::is_ignored_nhwc_2,
-                           space_to_depth_v1_2::examples_nhwc_2);
+                           space_to_depth_v1_2::get_examples_nhwc_2());
 }
 
 TEST_F(ValidationTest, space_to_depth_v1_2_nhwc_2) {
   const Model model = space_to_depth_v1_2::createTestModel_nhwc_2();
-  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::examples_nhwc_2);
+  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::get_examples_nhwc_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14651,12 +16043,27 @@
   generated_tests::Execute(device,
                            space_to_depth_v1_2::createTestModel_nhwc_relaxed_2,
                            space_to_depth_v1_2::is_ignored_nhwc_relaxed_2,
-                           space_to_depth_v1_2::examples_nhwc_relaxed_2);
+                           space_to_depth_v1_2::get_examples_nhwc_relaxed_2());
 }
 
 TEST_F(ValidationTest, space_to_depth_v1_2_nhwc_relaxed_2) {
   const Model model = space_to_depth_v1_2::createTestModel_nhwc_relaxed_2();
-  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::examples_nhwc_relaxed_2);
+  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::get_examples_nhwc_relaxed_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, space_to_depth_v1_2_nhwc_float16_2) {
+  generated_tests::Execute(device,
+                           space_to_depth_v1_2::createTestModel_nhwc_float16_2,
+                           space_to_depth_v1_2::is_ignored_nhwc_float16_2,
+                           space_to_depth_v1_2::get_examples_nhwc_float16_2());
+}
+
+TEST_F(ValidationTest, space_to_depth_v1_2_nhwc_float16_2) {
+  const Model model = space_to_depth_v1_2::createTestModel_nhwc_float16_2();
+  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::get_examples_nhwc_float16_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14666,12 +16073,12 @@
   generated_tests::Execute(device,
                            space_to_depth_v1_2::createTestModel_nhwc_quant8_2,
                            space_to_depth_v1_2::is_ignored_nhwc_quant8_2,
-                           space_to_depth_v1_2::examples_nhwc_quant8_2);
+                           space_to_depth_v1_2::get_examples_nhwc_quant8_2());
 }
 
 TEST_F(ValidationTest, space_to_depth_v1_2_nhwc_quant8_2) {
   const Model model = space_to_depth_v1_2::createTestModel_nhwc_quant8_2();
-  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::examples_nhwc_quant8_2);
+  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::get_examples_nhwc_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14681,12 +16088,12 @@
   generated_tests::Execute(device,
                            space_to_depth_v1_2::createTestModel_nchw_2,
                            space_to_depth_v1_2::is_ignored_nchw_2,
-                           space_to_depth_v1_2::examples_nchw_2);
+                           space_to_depth_v1_2::get_examples_nchw_2());
 }
 
 TEST_F(ValidationTest, space_to_depth_v1_2_nchw_2) {
   const Model model = space_to_depth_v1_2::createTestModel_nchw_2();
-  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::examples_nchw_2);
+  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::get_examples_nchw_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14696,12 +16103,27 @@
   generated_tests::Execute(device,
                            space_to_depth_v1_2::createTestModel_nchw_relaxed_2,
                            space_to_depth_v1_2::is_ignored_nchw_relaxed_2,
-                           space_to_depth_v1_2::examples_nchw_relaxed_2);
+                           space_to_depth_v1_2::get_examples_nchw_relaxed_2());
 }
 
 TEST_F(ValidationTest, space_to_depth_v1_2_nchw_relaxed_2) {
   const Model model = space_to_depth_v1_2::createTestModel_nchw_relaxed_2();
-  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::examples_nchw_relaxed_2);
+  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::get_examples_nchw_relaxed_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, space_to_depth_v1_2_nchw_float16_2) {
+  generated_tests::Execute(device,
+                           space_to_depth_v1_2::createTestModel_nchw_float16_2,
+                           space_to_depth_v1_2::is_ignored_nchw_float16_2,
+                           space_to_depth_v1_2::get_examples_nchw_float16_2());
+}
+
+TEST_F(ValidationTest, space_to_depth_v1_2_nchw_float16_2) {
+  const Model model = space_to_depth_v1_2::createTestModel_nchw_float16_2();
+  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::get_examples_nchw_float16_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14711,12 +16133,12 @@
   generated_tests::Execute(device,
                            space_to_depth_v1_2::createTestModel_nchw_quant8_2,
                            space_to_depth_v1_2::is_ignored_nchw_quant8_2,
-                           space_to_depth_v1_2::examples_nchw_quant8_2);
+                           space_to_depth_v1_2::get_examples_nchw_quant8_2());
 }
 
 TEST_F(ValidationTest, space_to_depth_v1_2_nchw_quant8_2) {
   const Model model = space_to_depth_v1_2::createTestModel_nchw_quant8_2();
-  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::examples_nchw_quant8_2);
+  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::get_examples_nchw_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14726,12 +16148,12 @@
   generated_tests::Execute(device,
                            space_to_depth_v1_2::createTestModel_nhwc_3,
                            space_to_depth_v1_2::is_ignored_nhwc_3,
-                           space_to_depth_v1_2::examples_nhwc_3);
+                           space_to_depth_v1_2::get_examples_nhwc_3());
 }
 
 TEST_F(ValidationTest, space_to_depth_v1_2_nhwc_3) {
   const Model model = space_to_depth_v1_2::createTestModel_nhwc_3();
-  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::examples_nhwc_3);
+  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::get_examples_nhwc_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14741,12 +16163,27 @@
   generated_tests::Execute(device,
                            space_to_depth_v1_2::createTestModel_nhwc_relaxed_3,
                            space_to_depth_v1_2::is_ignored_nhwc_relaxed_3,
-                           space_to_depth_v1_2::examples_nhwc_relaxed_3);
+                           space_to_depth_v1_2::get_examples_nhwc_relaxed_3());
 }
 
 TEST_F(ValidationTest, space_to_depth_v1_2_nhwc_relaxed_3) {
   const Model model = space_to_depth_v1_2::createTestModel_nhwc_relaxed_3();
-  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::examples_nhwc_relaxed_3);
+  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::get_examples_nhwc_relaxed_3());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, space_to_depth_v1_2_nhwc_float16_3) {
+  generated_tests::Execute(device,
+                           space_to_depth_v1_2::createTestModel_nhwc_float16_3,
+                           space_to_depth_v1_2::is_ignored_nhwc_float16_3,
+                           space_to_depth_v1_2::get_examples_nhwc_float16_3());
+}
+
+TEST_F(ValidationTest, space_to_depth_v1_2_nhwc_float16_3) {
+  const Model model = space_to_depth_v1_2::createTestModel_nhwc_float16_3();
+  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::get_examples_nhwc_float16_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14756,12 +16193,12 @@
   generated_tests::Execute(device,
                            space_to_depth_v1_2::createTestModel_nhwc_quant8_3,
                            space_to_depth_v1_2::is_ignored_nhwc_quant8_3,
-                           space_to_depth_v1_2::examples_nhwc_quant8_3);
+                           space_to_depth_v1_2::get_examples_nhwc_quant8_3());
 }
 
 TEST_F(ValidationTest, space_to_depth_v1_2_nhwc_quant8_3) {
   const Model model = space_to_depth_v1_2::createTestModel_nhwc_quant8_3();
-  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::examples_nhwc_quant8_3);
+  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::get_examples_nhwc_quant8_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14771,12 +16208,12 @@
   generated_tests::Execute(device,
                            space_to_depth_v1_2::createTestModel_nchw_3,
                            space_to_depth_v1_2::is_ignored_nchw_3,
-                           space_to_depth_v1_2::examples_nchw_3);
+                           space_to_depth_v1_2::get_examples_nchw_3());
 }
 
 TEST_F(ValidationTest, space_to_depth_v1_2_nchw_3) {
   const Model model = space_to_depth_v1_2::createTestModel_nchw_3();
-  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::examples_nchw_3);
+  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::get_examples_nchw_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14786,12 +16223,27 @@
   generated_tests::Execute(device,
                            space_to_depth_v1_2::createTestModel_nchw_relaxed_3,
                            space_to_depth_v1_2::is_ignored_nchw_relaxed_3,
-                           space_to_depth_v1_2::examples_nchw_relaxed_3);
+                           space_to_depth_v1_2::get_examples_nchw_relaxed_3());
 }
 
 TEST_F(ValidationTest, space_to_depth_v1_2_nchw_relaxed_3) {
   const Model model = space_to_depth_v1_2::createTestModel_nchw_relaxed_3();
-  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::examples_nchw_relaxed_3);
+  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::get_examples_nchw_relaxed_3());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, space_to_depth_v1_2_nchw_float16_3) {
+  generated_tests::Execute(device,
+                           space_to_depth_v1_2::createTestModel_nchw_float16_3,
+                           space_to_depth_v1_2::is_ignored_nchw_float16_3,
+                           space_to_depth_v1_2::get_examples_nchw_float16_3());
+}
+
+TEST_F(ValidationTest, space_to_depth_v1_2_nchw_float16_3) {
+  const Model model = space_to_depth_v1_2::createTestModel_nchw_float16_3();
+  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::get_examples_nchw_float16_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14801,12 +16253,12 @@
   generated_tests::Execute(device,
                            space_to_depth_v1_2::createTestModel_nchw_quant8_3,
                            space_to_depth_v1_2::is_ignored_nchw_quant8_3,
-                           space_to_depth_v1_2::examples_nchw_quant8_3);
+                           space_to_depth_v1_2::get_examples_nchw_quant8_3());
 }
 
 TEST_F(ValidationTest, space_to_depth_v1_2_nchw_quant8_3) {
   const Model model = space_to_depth_v1_2::createTestModel_nchw_quant8_3();
-  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::examples_nchw_quant8_3);
+  const std::vector<Request> requests = createRequests(space_to_depth_v1_2::get_examples_nchw_quant8_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14824,12 +16276,12 @@
   generated_tests::Execute(device,
                            split_float_1::createTestModel,
                            split_float_1::is_ignored,
-                           split_float_1::examples);
+                           split_float_1::get_examples());
 }
 
 TEST_F(ValidationTest, split_float_1) {
   const Model model = split_float_1::createTestModel();
-  const std::vector<Request> requests = createRequests(split_float_1::examples);
+  const std::vector<Request> requests = createRequests(split_float_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14839,12 +16291,12 @@
   generated_tests::Execute(device,
                            split_float_1::createTestModel_relaxed,
                            split_float_1::is_ignored_relaxed,
-                           split_float_1::examples_relaxed);
+                           split_float_1::get_examples_relaxed());
 }
 
 TEST_F(ValidationTest, split_float_1_relaxed) {
   const Model model = split_float_1::createTestModel_relaxed();
-  const std::vector<Request> requests = createRequests(split_float_1::examples_relaxed);
+  const std::vector<Request> requests = createRequests(split_float_1::get_examples_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14854,12 +16306,12 @@
   generated_tests::Execute(device,
                            split_float_1::createTestModel_float16,
                            split_float_1::is_ignored_float16,
-                           split_float_1::examples_float16);
+                           split_float_1::get_examples_float16());
 }
 
 TEST_F(ValidationTest, split_float_1_float16) {
   const Model model = split_float_1::createTestModel_float16();
-  const std::vector<Request> requests = createRequests(split_float_1::examples_float16);
+  const std::vector<Request> requests = createRequests(split_float_1::get_examples_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14877,12 +16329,12 @@
   generated_tests::Execute(device,
                            split_float_2::createTestModel,
                            split_float_2::is_ignored,
-                           split_float_2::examples);
+                           split_float_2::get_examples());
 }
 
 TEST_F(ValidationTest, split_float_2) {
   const Model model = split_float_2::createTestModel();
-  const std::vector<Request> requests = createRequests(split_float_2::examples);
+  const std::vector<Request> requests = createRequests(split_float_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14892,12 +16344,12 @@
   generated_tests::Execute(device,
                            split_float_2::createTestModel_relaxed,
                            split_float_2::is_ignored_relaxed,
-                           split_float_2::examples_relaxed);
+                           split_float_2::get_examples_relaxed());
 }
 
 TEST_F(ValidationTest, split_float_2_relaxed) {
   const Model model = split_float_2::createTestModel_relaxed();
-  const std::vector<Request> requests = createRequests(split_float_2::examples_relaxed);
+  const std::vector<Request> requests = createRequests(split_float_2::get_examples_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14907,12 +16359,12 @@
   generated_tests::Execute(device,
                            split_float_2::createTestModel_float16,
                            split_float_2::is_ignored_float16,
-                           split_float_2::examples_float16);
+                           split_float_2::get_examples_float16());
 }
 
 TEST_F(ValidationTest, split_float_2_float16) {
   const Model model = split_float_2::createTestModel_float16();
-  const std::vector<Request> requests = createRequests(split_float_2::examples_float16);
+  const std::vector<Request> requests = createRequests(split_float_2::get_examples_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14930,12 +16382,12 @@
   generated_tests::Execute(device,
                            split_float_3::createTestModel,
                            split_float_3::is_ignored,
-                           split_float_3::examples);
+                           split_float_3::get_examples());
 }
 
 TEST_F(ValidationTest, split_float_3) {
   const Model model = split_float_3::createTestModel();
-  const std::vector<Request> requests = createRequests(split_float_3::examples);
+  const std::vector<Request> requests = createRequests(split_float_3::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14945,12 +16397,12 @@
   generated_tests::Execute(device,
                            split_float_3::createTestModel_relaxed,
                            split_float_3::is_ignored_relaxed,
-                           split_float_3::examples_relaxed);
+                           split_float_3::get_examples_relaxed());
 }
 
 TEST_F(ValidationTest, split_float_3_relaxed) {
   const Model model = split_float_3::createTestModel_relaxed();
-  const std::vector<Request> requests = createRequests(split_float_3::examples_relaxed);
+  const std::vector<Request> requests = createRequests(split_float_3::get_examples_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14960,12 +16412,12 @@
   generated_tests::Execute(device,
                            split_float_3::createTestModel_float16,
                            split_float_3::is_ignored_float16,
-                           split_float_3::examples_float16);
+                           split_float_3::get_examples_float16());
 }
 
 TEST_F(ValidationTest, split_float_3_float16) {
   const Model model = split_float_3::createTestModel_float16();
-  const std::vector<Request> requests = createRequests(split_float_3::examples_float16);
+  const std::vector<Request> requests = createRequests(split_float_3::get_examples_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14983,12 +16435,12 @@
   generated_tests::Execute(device,
                            split_float_4::createTestModel,
                            split_float_4::is_ignored,
-                           split_float_4::examples);
+                           split_float_4::get_examples());
 }
 
 TEST_F(ValidationTest, split_float_4) {
   const Model model = split_float_4::createTestModel();
-  const std::vector<Request> requests = createRequests(split_float_4::examples);
+  const std::vector<Request> requests = createRequests(split_float_4::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -14998,12 +16450,12 @@
   generated_tests::Execute(device,
                            split_float_4::createTestModel_relaxed,
                            split_float_4::is_ignored_relaxed,
-                           split_float_4::examples_relaxed);
+                           split_float_4::get_examples_relaxed());
 }
 
 TEST_F(ValidationTest, split_float_4_relaxed) {
   const Model model = split_float_4::createTestModel_relaxed();
-  const std::vector<Request> requests = createRequests(split_float_4::examples_relaxed);
+  const std::vector<Request> requests = createRequests(split_float_4::get_examples_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15013,12 +16465,12 @@
   generated_tests::Execute(device,
                            split_float_4::createTestModel_float16,
                            split_float_4::is_ignored_float16,
-                           split_float_4::examples_float16);
+                           split_float_4::get_examples_float16());
 }
 
 TEST_F(ValidationTest, split_float_4_float16) {
   const Model model = split_float_4::createTestModel_float16();
-  const std::vector<Request> requests = createRequests(split_float_4::examples_float16);
+  const std::vector<Request> requests = createRequests(split_float_4::get_examples_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15036,12 +16488,12 @@
   generated_tests::Execute(device,
                            split_float_5::createTestModel,
                            split_float_5::is_ignored,
-                           split_float_5::examples);
+                           split_float_5::get_examples());
 }
 
 TEST_F(ValidationTest, split_float_5) {
   const Model model = split_float_5::createTestModel();
-  const std::vector<Request> requests = createRequests(split_float_5::examples);
+  const std::vector<Request> requests = createRequests(split_float_5::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15051,12 +16503,12 @@
   generated_tests::Execute(device,
                            split_float_5::createTestModel_relaxed,
                            split_float_5::is_ignored_relaxed,
-                           split_float_5::examples_relaxed);
+                           split_float_5::get_examples_relaxed());
 }
 
 TEST_F(ValidationTest, split_float_5_relaxed) {
   const Model model = split_float_5::createTestModel_relaxed();
-  const std::vector<Request> requests = createRequests(split_float_5::examples_relaxed);
+  const std::vector<Request> requests = createRequests(split_float_5::get_examples_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15066,12 +16518,12 @@
   generated_tests::Execute(device,
                            split_float_5::createTestModel_float16,
                            split_float_5::is_ignored_float16,
-                           split_float_5::examples_float16);
+                           split_float_5::get_examples_float16());
 }
 
 TEST_F(ValidationTest, split_float_5_float16) {
   const Model model = split_float_5::createTestModel_float16();
-  const std::vector<Request> requests = createRequests(split_float_5::examples_float16);
+  const std::vector<Request> requests = createRequests(split_float_5::get_examples_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15089,12 +16541,12 @@
   generated_tests::Execute(device,
                            split_int32_1::createTestModel,
                            split_int32_1::is_ignored,
-                           split_int32_1::examples);
+                           split_int32_1::get_examples());
 }
 
 TEST_F(ValidationTest, split_int32_1) {
   const Model model = split_int32_1::createTestModel();
-  const std::vector<Request> requests = createRequests(split_int32_1::examples);
+  const std::vector<Request> requests = createRequests(split_int32_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15104,12 +16556,12 @@
   generated_tests::Execute(device,
                            split_int32_1::createTestModel_relaxed,
                            split_int32_1::is_ignored_relaxed,
-                           split_int32_1::examples_relaxed);
+                           split_int32_1::get_examples_relaxed());
 }
 
 TEST_F(ValidationTest, split_int32_1_relaxed) {
   const Model model = split_int32_1::createTestModel_relaxed();
-  const std::vector<Request> requests = createRequests(split_int32_1::examples_relaxed);
+  const std::vector<Request> requests = createRequests(split_int32_1::get_examples_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15127,12 +16579,12 @@
   generated_tests::Execute(device,
                            split_int32_2::createTestModel,
                            split_int32_2::is_ignored,
-                           split_int32_2::examples);
+                           split_int32_2::get_examples());
 }
 
 TEST_F(ValidationTest, split_int32_2) {
   const Model model = split_int32_2::createTestModel();
-  const std::vector<Request> requests = createRequests(split_int32_2::examples);
+  const std::vector<Request> requests = createRequests(split_int32_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15142,12 +16594,12 @@
   generated_tests::Execute(device,
                            split_int32_2::createTestModel_relaxed,
                            split_int32_2::is_ignored_relaxed,
-                           split_int32_2::examples_relaxed);
+                           split_int32_2::get_examples_relaxed());
 }
 
 TEST_F(ValidationTest, split_int32_2_relaxed) {
   const Model model = split_int32_2::createTestModel_relaxed();
-  const std::vector<Request> requests = createRequests(split_int32_2::examples_relaxed);
+  const std::vector<Request> requests = createRequests(split_int32_2::get_examples_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15165,12 +16617,12 @@
   generated_tests::Execute(device,
                            split_int32_3::createTestModel,
                            split_int32_3::is_ignored,
-                           split_int32_3::examples);
+                           split_int32_3::get_examples());
 }
 
 TEST_F(ValidationTest, split_int32_3) {
   const Model model = split_int32_3::createTestModel();
-  const std::vector<Request> requests = createRequests(split_int32_3::examples);
+  const std::vector<Request> requests = createRequests(split_int32_3::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15180,12 +16632,12 @@
   generated_tests::Execute(device,
                            split_int32_3::createTestModel_relaxed,
                            split_int32_3::is_ignored_relaxed,
-                           split_int32_3::examples_relaxed);
+                           split_int32_3::get_examples_relaxed());
 }
 
 TEST_F(ValidationTest, split_int32_3_relaxed) {
   const Model model = split_int32_3::createTestModel_relaxed();
-  const std::vector<Request> requests = createRequests(split_int32_3::examples_relaxed);
+  const std::vector<Request> requests = createRequests(split_int32_3::get_examples_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15203,12 +16655,12 @@
   generated_tests::Execute(device,
                            split_int32_4::createTestModel,
                            split_int32_4::is_ignored,
-                           split_int32_4::examples);
+                           split_int32_4::get_examples());
 }
 
 TEST_F(ValidationTest, split_int32_4) {
   const Model model = split_int32_4::createTestModel();
-  const std::vector<Request> requests = createRequests(split_int32_4::examples);
+  const std::vector<Request> requests = createRequests(split_int32_4::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15218,12 +16670,12 @@
   generated_tests::Execute(device,
                            split_int32_4::createTestModel_relaxed,
                            split_int32_4::is_ignored_relaxed,
-                           split_int32_4::examples_relaxed);
+                           split_int32_4::get_examples_relaxed());
 }
 
 TEST_F(ValidationTest, split_int32_4_relaxed) {
   const Model model = split_int32_4::createTestModel_relaxed();
-  const std::vector<Request> requests = createRequests(split_int32_4::examples_relaxed);
+  const std::vector<Request> requests = createRequests(split_int32_4::get_examples_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15241,12 +16693,12 @@
   generated_tests::Execute(device,
                            split_quant8_1::createTestModel,
                            split_quant8_1::is_ignored,
-                           split_quant8_1::examples);
+                           split_quant8_1::get_examples());
 }
 
 TEST_F(ValidationTest, split_quant8_1) {
   const Model model = split_quant8_1::createTestModel();
-  const std::vector<Request> requests = createRequests(split_quant8_1::examples);
+  const std::vector<Request> requests = createRequests(split_quant8_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15256,12 +16708,12 @@
   generated_tests::Execute(device,
                            split_quant8_1::createTestModel_relaxed,
                            split_quant8_1::is_ignored_relaxed,
-                           split_quant8_1::examples_relaxed);
+                           split_quant8_1::get_examples_relaxed());
 }
 
 TEST_F(ValidationTest, split_quant8_1_relaxed) {
   const Model model = split_quant8_1::createTestModel_relaxed();
-  const std::vector<Request> requests = createRequests(split_quant8_1::examples_relaxed);
+  const std::vector<Request> requests = createRequests(split_quant8_1::get_examples_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15279,12 +16731,12 @@
   generated_tests::Execute(device,
                            split_quant8_2::createTestModel,
                            split_quant8_2::is_ignored,
-                           split_quant8_2::examples);
+                           split_quant8_2::get_examples());
 }
 
 TEST_F(ValidationTest, split_quant8_2) {
   const Model model = split_quant8_2::createTestModel();
-  const std::vector<Request> requests = createRequests(split_quant8_2::examples);
+  const std::vector<Request> requests = createRequests(split_quant8_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15294,12 +16746,12 @@
   generated_tests::Execute(device,
                            split_quant8_2::createTestModel_relaxed,
                            split_quant8_2::is_ignored_relaxed,
-                           split_quant8_2::examples_relaxed);
+                           split_quant8_2::get_examples_relaxed());
 }
 
 TEST_F(ValidationTest, split_quant8_2_relaxed) {
   const Model model = split_quant8_2::createTestModel_relaxed();
-  const std::vector<Request> requests = createRequests(split_quant8_2::examples_relaxed);
+  const std::vector<Request> requests = createRequests(split_quant8_2::get_examples_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15317,12 +16769,12 @@
   generated_tests::Execute(device,
                            split_quant8_3::createTestModel,
                            split_quant8_3::is_ignored,
-                           split_quant8_3::examples);
+                           split_quant8_3::get_examples());
 }
 
 TEST_F(ValidationTest, split_quant8_3) {
   const Model model = split_quant8_3::createTestModel();
-  const std::vector<Request> requests = createRequests(split_quant8_3::examples);
+  const std::vector<Request> requests = createRequests(split_quant8_3::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15340,12 +16792,58 @@
   generated_tests::Execute(device,
                            split_quant8_4::createTestModel,
                            split_quant8_4::is_ignored,
-                           split_quant8_4::examples);
+                           split_quant8_4::get_examples());
 }
 
 TEST_F(ValidationTest, split_quant8_4) {
   const Model model = split_quant8_4::createTestModel();
-  const std::vector<Request> requests = createRequests(split_quant8_4::examples);
+  const std::vector<Request> requests = createRequests(split_quant8_4::get_examples());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+// Generated from: squeeze_float16.mod.py.
+namespace squeeze_float16 {
+// Generated squeeze_float16 test
+#include "examples/squeeze_float16.example.cpp"
+// Generated model constructor
+#include "vts_models/squeeze_float16.model.cpp"
+} // namespace squeeze_float16
+
+TEST_F(NeuralnetworksHidlTest, squeeze_float16) {
+  generated_tests::Execute(device,
+                           squeeze_float16::createTestModel,
+                           squeeze_float16::is_ignored,
+                           squeeze_float16::get_examples());
+}
+
+TEST_F(ValidationTest, squeeze_float16) {
+  const Model model = squeeze_float16::createTestModel();
+  const std::vector<Request> requests = createRequests(squeeze_float16::get_examples());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+// Generated from: strided_slice_float16.mod.py.
+namespace strided_slice_float16 {
+// Generated strided_slice_float16 test
+#include "examples/strided_slice_float16.example.cpp"
+// Generated model constructor
+#include "vts_models/strided_slice_float16.model.cpp"
+} // namespace strided_slice_float16
+
+TEST_F(NeuralnetworksHidlTest, strided_slice_float16) {
+  generated_tests::Execute(device,
+                           strided_slice_float16::createTestModel,
+                           strided_slice_float16::is_ignored,
+                           strided_slice_float16::get_examples());
+}
+
+TEST_F(ValidationTest, strided_slice_float16) {
+  const Model model = strided_slice_float16::createTestModel();
+  const std::vector<Request> requests = createRequests(strided_slice_float16::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15363,12 +16861,12 @@
   generated_tests::Execute(device,
                            sub_float16::createTestModel,
                            sub_float16::is_ignored,
-                           sub_float16::examples);
+                           sub_float16::get_examples());
 }
 
 TEST_F(ValidationTest, sub_float16) {
   const Model model = sub_float16::createTestModel();
-  const std::vector<Request> requests = createRequests(sub_float16::examples);
+  const std::vector<Request> requests = createRequests(sub_float16::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15386,12 +16884,12 @@
   generated_tests::Execute(device,
                            sub_float16_broadcast::createTestModel,
                            sub_float16_broadcast::is_ignored,
-                           sub_float16_broadcast::examples);
+                           sub_float16_broadcast::get_examples());
 }
 
 TEST_F(ValidationTest, sub_float16_broadcast) {
   const Model model = sub_float16_broadcast::createTestModel();
-  const std::vector<Request> requests = createRequests(sub_float16_broadcast::examples);
+  const std::vector<Request> requests = createRequests(sub_float16_broadcast::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15409,12 +16907,12 @@
   generated_tests::Execute(device,
                            sub_quantized::createTestModel,
                            sub_quantized::is_ignored,
-                           sub_quantized::examples);
+                           sub_quantized::get_examples());
 }
 
 TEST_F(ValidationTest, sub_quantized) {
   const Model model = sub_quantized::createTestModel();
-  const std::vector<Request> requests = createRequests(sub_quantized::examples);
+  const std::vector<Request> requests = createRequests(sub_quantized::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15432,12 +16930,12 @@
   generated_tests::Execute(device,
                            sub_quantized_broadcast::createTestModel,
                            sub_quantized_broadcast::is_ignored,
-                           sub_quantized_broadcast::examples);
+                           sub_quantized_broadcast::get_examples());
 }
 
 TEST_F(ValidationTest, sub_quantized_broadcast) {
   const Model model = sub_quantized_broadcast::createTestModel();
-  const std::vector<Request> requests = createRequests(sub_quantized_broadcast::examples);
+  const std::vector<Request> requests = createRequests(sub_quantized_broadcast::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15455,12 +16953,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel,
                            sub_quantized_different_scales::is_ignored,
-                           sub_quantized_different_scales::examples);
+                           sub_quantized_different_scales::get_examples());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales) {
   const Model model = sub_quantized_different_scales::createTestModel();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15470,12 +16968,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_2,
                            sub_quantized_different_scales::is_ignored_2,
-                           sub_quantized_different_scales::examples_2);
+                           sub_quantized_different_scales::get_examples_2());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_2) {
   const Model model = sub_quantized_different_scales::createTestModel_2();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_2);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15485,12 +16983,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_3,
                            sub_quantized_different_scales::is_ignored_3,
-                           sub_quantized_different_scales::examples_3);
+                           sub_quantized_different_scales::get_examples_3());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_3) {
   const Model model = sub_quantized_different_scales::createTestModel_3();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_3);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15500,12 +16998,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_4,
                            sub_quantized_different_scales::is_ignored_4,
-                           sub_quantized_different_scales::examples_4);
+                           sub_quantized_different_scales::get_examples_4());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_4) {
   const Model model = sub_quantized_different_scales::createTestModel_4();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_4);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15515,12 +17013,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_5,
                            sub_quantized_different_scales::is_ignored_5,
-                           sub_quantized_different_scales::examples_5);
+                           sub_quantized_different_scales::get_examples_5());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_5) {
   const Model model = sub_quantized_different_scales::createTestModel_5();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_5);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_5());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15530,12 +17028,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_6,
                            sub_quantized_different_scales::is_ignored_6,
-                           sub_quantized_different_scales::examples_6);
+                           sub_quantized_different_scales::get_examples_6());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_6) {
   const Model model = sub_quantized_different_scales::createTestModel_6();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_6);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_6());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15545,12 +17043,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_7,
                            sub_quantized_different_scales::is_ignored_7,
-                           sub_quantized_different_scales::examples_7);
+                           sub_quantized_different_scales::get_examples_7());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_7) {
   const Model model = sub_quantized_different_scales::createTestModel_7();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_7);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_7());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15560,12 +17058,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_8,
                            sub_quantized_different_scales::is_ignored_8,
-                           sub_quantized_different_scales::examples_8);
+                           sub_quantized_different_scales::get_examples_8());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_8) {
   const Model model = sub_quantized_different_scales::createTestModel_8();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_8);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15575,12 +17073,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_9,
                            sub_quantized_different_scales::is_ignored_9,
-                           sub_quantized_different_scales::examples_9);
+                           sub_quantized_different_scales::get_examples_9());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_9) {
   const Model model = sub_quantized_different_scales::createTestModel_9();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_9);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_9());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15590,12 +17088,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_10,
                            sub_quantized_different_scales::is_ignored_10,
-                           sub_quantized_different_scales::examples_10);
+                           sub_quantized_different_scales::get_examples_10());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_10) {
   const Model model = sub_quantized_different_scales::createTestModel_10();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_10);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_10());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15605,12 +17103,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_11,
                            sub_quantized_different_scales::is_ignored_11,
-                           sub_quantized_different_scales::examples_11);
+                           sub_quantized_different_scales::get_examples_11());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_11) {
   const Model model = sub_quantized_different_scales::createTestModel_11();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_11);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_11());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15620,12 +17118,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_12,
                            sub_quantized_different_scales::is_ignored_12,
-                           sub_quantized_different_scales::examples_12);
+                           sub_quantized_different_scales::get_examples_12());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_12) {
   const Model model = sub_quantized_different_scales::createTestModel_12();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_12);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_12());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15635,12 +17133,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_13,
                            sub_quantized_different_scales::is_ignored_13,
-                           sub_quantized_different_scales::examples_13);
+                           sub_quantized_different_scales::get_examples_13());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_13) {
   const Model model = sub_quantized_different_scales::createTestModel_13();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_13);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_13());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15650,12 +17148,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_14,
                            sub_quantized_different_scales::is_ignored_14,
-                           sub_quantized_different_scales::examples_14);
+                           sub_quantized_different_scales::get_examples_14());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_14) {
   const Model model = sub_quantized_different_scales::createTestModel_14();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_14);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_14());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15665,12 +17163,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_15,
                            sub_quantized_different_scales::is_ignored_15,
-                           sub_quantized_different_scales::examples_15);
+                           sub_quantized_different_scales::get_examples_15());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_15) {
   const Model model = sub_quantized_different_scales::createTestModel_15();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_15);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_15());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15680,12 +17178,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_16,
                            sub_quantized_different_scales::is_ignored_16,
-                           sub_quantized_different_scales::examples_16);
+                           sub_quantized_different_scales::get_examples_16());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_16) {
   const Model model = sub_quantized_different_scales::createTestModel_16();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_16);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15695,12 +17193,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_17,
                            sub_quantized_different_scales::is_ignored_17,
-                           sub_quantized_different_scales::examples_17);
+                           sub_quantized_different_scales::get_examples_17());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_17) {
   const Model model = sub_quantized_different_scales::createTestModel_17();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_17);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_17());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15710,12 +17208,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_18,
                            sub_quantized_different_scales::is_ignored_18,
-                           sub_quantized_different_scales::examples_18);
+                           sub_quantized_different_scales::get_examples_18());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_18) {
   const Model model = sub_quantized_different_scales::createTestModel_18();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_18);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_18());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15725,12 +17223,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_19,
                            sub_quantized_different_scales::is_ignored_19,
-                           sub_quantized_different_scales::examples_19);
+                           sub_quantized_different_scales::get_examples_19());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_19) {
   const Model model = sub_quantized_different_scales::createTestModel_19();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_19);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_19());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15740,12 +17238,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_20,
                            sub_quantized_different_scales::is_ignored_20,
-                           sub_quantized_different_scales::examples_20);
+                           sub_quantized_different_scales::get_examples_20());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_20) {
   const Model model = sub_quantized_different_scales::createTestModel_20();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_20);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_20());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15755,12 +17253,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_21,
                            sub_quantized_different_scales::is_ignored_21,
-                           sub_quantized_different_scales::examples_21);
+                           sub_quantized_different_scales::get_examples_21());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_21) {
   const Model model = sub_quantized_different_scales::createTestModel_21();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_21);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_21());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15770,12 +17268,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_22,
                            sub_quantized_different_scales::is_ignored_22,
-                           sub_quantized_different_scales::examples_22);
+                           sub_quantized_different_scales::get_examples_22());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_22) {
   const Model model = sub_quantized_different_scales::createTestModel_22();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_22);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_22());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15785,12 +17283,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_23,
                            sub_quantized_different_scales::is_ignored_23,
-                           sub_quantized_different_scales::examples_23);
+                           sub_quantized_different_scales::get_examples_23());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_23) {
   const Model model = sub_quantized_different_scales::createTestModel_23();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_23);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_23());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15800,12 +17298,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_24,
                            sub_quantized_different_scales::is_ignored_24,
-                           sub_quantized_different_scales::examples_24);
+                           sub_quantized_different_scales::get_examples_24());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_24) {
   const Model model = sub_quantized_different_scales::createTestModel_24();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_24);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_24());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15815,12 +17313,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_25,
                            sub_quantized_different_scales::is_ignored_25,
-                           sub_quantized_different_scales::examples_25);
+                           sub_quantized_different_scales::get_examples_25());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_25) {
   const Model model = sub_quantized_different_scales::createTestModel_25();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_25);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_25());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15830,12 +17328,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_26,
                            sub_quantized_different_scales::is_ignored_26,
-                           sub_quantized_different_scales::examples_26);
+                           sub_quantized_different_scales::get_examples_26());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_26) {
   const Model model = sub_quantized_different_scales::createTestModel_26();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_26);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_26());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15845,12 +17343,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_27,
                            sub_quantized_different_scales::is_ignored_27,
-                           sub_quantized_different_scales::examples_27);
+                           sub_quantized_different_scales::get_examples_27());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_27) {
   const Model model = sub_quantized_different_scales::createTestModel_27();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_27);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_27());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15860,12 +17358,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_28,
                            sub_quantized_different_scales::is_ignored_28,
-                           sub_quantized_different_scales::examples_28);
+                           sub_quantized_different_scales::get_examples_28());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_28) {
   const Model model = sub_quantized_different_scales::createTestModel_28();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_28);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_28());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15875,12 +17373,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_29,
                            sub_quantized_different_scales::is_ignored_29,
-                           sub_quantized_different_scales::examples_29);
+                           sub_quantized_different_scales::get_examples_29());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_29) {
   const Model model = sub_quantized_different_scales::createTestModel_29();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_29);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_29());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15890,12 +17388,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_30,
                            sub_quantized_different_scales::is_ignored_30,
-                           sub_quantized_different_scales::examples_30);
+                           sub_quantized_different_scales::get_examples_30());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_30) {
   const Model model = sub_quantized_different_scales::createTestModel_30();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_30);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_30());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15905,12 +17403,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_31,
                            sub_quantized_different_scales::is_ignored_31,
-                           sub_quantized_different_scales::examples_31);
+                           sub_quantized_different_scales::get_examples_31());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_31) {
   const Model model = sub_quantized_different_scales::createTestModel_31();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_31);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_31());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15920,12 +17418,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_32,
                            sub_quantized_different_scales::is_ignored_32,
-                           sub_quantized_different_scales::examples_32);
+                           sub_quantized_different_scales::get_examples_32());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_32) {
   const Model model = sub_quantized_different_scales::createTestModel_32();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_32);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_32());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15935,12 +17433,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_33,
                            sub_quantized_different_scales::is_ignored_33,
-                           sub_quantized_different_scales::examples_33);
+                           sub_quantized_different_scales::get_examples_33());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_33) {
   const Model model = sub_quantized_different_scales::createTestModel_33();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_33);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_33());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15950,12 +17448,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_34,
                            sub_quantized_different_scales::is_ignored_34,
-                           sub_quantized_different_scales::examples_34);
+                           sub_quantized_different_scales::get_examples_34());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_34) {
   const Model model = sub_quantized_different_scales::createTestModel_34();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_34);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_34());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15965,12 +17463,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_35,
                            sub_quantized_different_scales::is_ignored_35,
-                           sub_quantized_different_scales::examples_35);
+                           sub_quantized_different_scales::get_examples_35());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_35) {
   const Model model = sub_quantized_different_scales::createTestModel_35();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_35);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_35());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15980,12 +17478,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_36,
                            sub_quantized_different_scales::is_ignored_36,
-                           sub_quantized_different_scales::examples_36);
+                           sub_quantized_different_scales::get_examples_36());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_36) {
   const Model model = sub_quantized_different_scales::createTestModel_36();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_36);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_36());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -15995,12 +17493,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_37,
                            sub_quantized_different_scales::is_ignored_37,
-                           sub_quantized_different_scales::examples_37);
+                           sub_quantized_different_scales::get_examples_37());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_37) {
   const Model model = sub_quantized_different_scales::createTestModel_37();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_37);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_37());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16010,12 +17508,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_38,
                            sub_quantized_different_scales::is_ignored_38,
-                           sub_quantized_different_scales::examples_38);
+                           sub_quantized_different_scales::get_examples_38());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_38) {
   const Model model = sub_quantized_different_scales::createTestModel_38();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_38);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_38());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16025,12 +17523,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_39,
                            sub_quantized_different_scales::is_ignored_39,
-                           sub_quantized_different_scales::examples_39);
+                           sub_quantized_different_scales::get_examples_39());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_39) {
   const Model model = sub_quantized_different_scales::createTestModel_39();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_39);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_39());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16040,12 +17538,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_40,
                            sub_quantized_different_scales::is_ignored_40,
-                           sub_quantized_different_scales::examples_40);
+                           sub_quantized_different_scales::get_examples_40());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_40) {
   const Model model = sub_quantized_different_scales::createTestModel_40();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_40);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_40());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16055,12 +17553,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_41,
                            sub_quantized_different_scales::is_ignored_41,
-                           sub_quantized_different_scales::examples_41);
+                           sub_quantized_different_scales::get_examples_41());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_41) {
   const Model model = sub_quantized_different_scales::createTestModel_41();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_41);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_41());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16070,12 +17568,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_42,
                            sub_quantized_different_scales::is_ignored_42,
-                           sub_quantized_different_scales::examples_42);
+                           sub_quantized_different_scales::get_examples_42());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_42) {
   const Model model = sub_quantized_different_scales::createTestModel_42();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_42);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_42());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16085,12 +17583,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_43,
                            sub_quantized_different_scales::is_ignored_43,
-                           sub_quantized_different_scales::examples_43);
+                           sub_quantized_different_scales::get_examples_43());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_43) {
   const Model model = sub_quantized_different_scales::createTestModel_43();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_43);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_43());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16100,12 +17598,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_44,
                            sub_quantized_different_scales::is_ignored_44,
-                           sub_quantized_different_scales::examples_44);
+                           sub_quantized_different_scales::get_examples_44());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_44) {
   const Model model = sub_quantized_different_scales::createTestModel_44();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_44);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_44());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16115,12 +17613,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_45,
                            sub_quantized_different_scales::is_ignored_45,
-                           sub_quantized_different_scales::examples_45);
+                           sub_quantized_different_scales::get_examples_45());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_45) {
   const Model model = sub_quantized_different_scales::createTestModel_45();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_45);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_45());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16130,12 +17628,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_46,
                            sub_quantized_different_scales::is_ignored_46,
-                           sub_quantized_different_scales::examples_46);
+                           sub_quantized_different_scales::get_examples_46());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_46) {
   const Model model = sub_quantized_different_scales::createTestModel_46();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_46);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_46());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16145,12 +17643,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_47,
                            sub_quantized_different_scales::is_ignored_47,
-                           sub_quantized_different_scales::examples_47);
+                           sub_quantized_different_scales::get_examples_47());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_47) {
   const Model model = sub_quantized_different_scales::createTestModel_47();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_47);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_47());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16160,12 +17658,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_48,
                            sub_quantized_different_scales::is_ignored_48,
-                           sub_quantized_different_scales::examples_48);
+                           sub_quantized_different_scales::get_examples_48());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_48) {
   const Model model = sub_quantized_different_scales::createTestModel_48();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_48);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_48());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16175,12 +17673,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_49,
                            sub_quantized_different_scales::is_ignored_49,
-                           sub_quantized_different_scales::examples_49);
+                           sub_quantized_different_scales::get_examples_49());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_49) {
   const Model model = sub_quantized_different_scales::createTestModel_49();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_49);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_49());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16190,12 +17688,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_50,
                            sub_quantized_different_scales::is_ignored_50,
-                           sub_quantized_different_scales::examples_50);
+                           sub_quantized_different_scales::get_examples_50());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_50) {
   const Model model = sub_quantized_different_scales::createTestModel_50();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_50);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_50());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16205,12 +17703,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_51,
                            sub_quantized_different_scales::is_ignored_51,
-                           sub_quantized_different_scales::examples_51);
+                           sub_quantized_different_scales::get_examples_51());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_51) {
   const Model model = sub_quantized_different_scales::createTestModel_51();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_51);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_51());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16220,12 +17718,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_52,
                            sub_quantized_different_scales::is_ignored_52,
-                           sub_quantized_different_scales::examples_52);
+                           sub_quantized_different_scales::get_examples_52());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_52) {
   const Model model = sub_quantized_different_scales::createTestModel_52();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_52);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_52());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16235,12 +17733,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_53,
                            sub_quantized_different_scales::is_ignored_53,
-                           sub_quantized_different_scales::examples_53);
+                           sub_quantized_different_scales::get_examples_53());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_53) {
   const Model model = sub_quantized_different_scales::createTestModel_53();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_53);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_53());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16250,12 +17748,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_54,
                            sub_quantized_different_scales::is_ignored_54,
-                           sub_quantized_different_scales::examples_54);
+                           sub_quantized_different_scales::get_examples_54());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_54) {
   const Model model = sub_quantized_different_scales::createTestModel_54();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_54);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_54());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16265,12 +17763,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_55,
                            sub_quantized_different_scales::is_ignored_55,
-                           sub_quantized_different_scales::examples_55);
+                           sub_quantized_different_scales::get_examples_55());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_55) {
   const Model model = sub_quantized_different_scales::createTestModel_55();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_55);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_55());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16280,12 +17778,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_56,
                            sub_quantized_different_scales::is_ignored_56,
-                           sub_quantized_different_scales::examples_56);
+                           sub_quantized_different_scales::get_examples_56());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_56) {
   const Model model = sub_quantized_different_scales::createTestModel_56();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_56);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_56());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16295,12 +17793,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_57,
                            sub_quantized_different_scales::is_ignored_57,
-                           sub_quantized_different_scales::examples_57);
+                           sub_quantized_different_scales::get_examples_57());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_57) {
   const Model model = sub_quantized_different_scales::createTestModel_57();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_57);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_57());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16310,12 +17808,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_58,
                            sub_quantized_different_scales::is_ignored_58,
-                           sub_quantized_different_scales::examples_58);
+                           sub_quantized_different_scales::get_examples_58());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_58) {
   const Model model = sub_quantized_different_scales::createTestModel_58();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_58);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_58());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16325,12 +17823,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_59,
                            sub_quantized_different_scales::is_ignored_59,
-                           sub_quantized_different_scales::examples_59);
+                           sub_quantized_different_scales::get_examples_59());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_59) {
   const Model model = sub_quantized_different_scales::createTestModel_59();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_59);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_59());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16340,12 +17838,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_60,
                            sub_quantized_different_scales::is_ignored_60,
-                           sub_quantized_different_scales::examples_60);
+                           sub_quantized_different_scales::get_examples_60());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_60) {
   const Model model = sub_quantized_different_scales::createTestModel_60();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_60);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_60());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16355,12 +17853,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_61,
                            sub_quantized_different_scales::is_ignored_61,
-                           sub_quantized_different_scales::examples_61);
+                           sub_quantized_different_scales::get_examples_61());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_61) {
   const Model model = sub_quantized_different_scales::createTestModel_61();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_61);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_61());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16370,12 +17868,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_62,
                            sub_quantized_different_scales::is_ignored_62,
-                           sub_quantized_different_scales::examples_62);
+                           sub_quantized_different_scales::get_examples_62());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_62) {
   const Model model = sub_quantized_different_scales::createTestModel_62();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_62);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_62());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16385,12 +17883,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_63,
                            sub_quantized_different_scales::is_ignored_63,
-                           sub_quantized_different_scales::examples_63);
+                           sub_quantized_different_scales::get_examples_63());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_63) {
   const Model model = sub_quantized_different_scales::createTestModel_63();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_63);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_63());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16400,12 +17898,12 @@
   generated_tests::Execute(device,
                            sub_quantized_different_scales::createTestModel_64,
                            sub_quantized_different_scales::is_ignored_64,
-                           sub_quantized_different_scales::examples_64);
+                           sub_quantized_different_scales::get_examples_64());
 }
 
 TEST_F(ValidationTest, sub_quantized_different_scales_64) {
   const Model model = sub_quantized_different_scales::createTestModel_64();
-  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::examples_64);
+  const std::vector<Request> requests = createRequests(sub_quantized_different_scales::get_examples_64());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16423,12 +17921,12 @@
   generated_tests::Execute(device,
                            tanh_float16::createTestModel,
                            tanh_float16::is_ignored,
-                           tanh_float16::examples);
+                           tanh_float16::get_examples());
 }
 
 TEST_F(ValidationTest, tanh_float16) {
   const Model model = tanh_float16::createTestModel();
-  const std::vector<Request> requests = createRequests(tanh_float16::examples);
+  const std::vector<Request> requests = createRequests(tanh_float16::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16446,12 +17944,12 @@
   generated_tests::Execute(device,
                            tanh_quantized::createTestModel,
                            tanh_quantized::is_ignored,
-                           tanh_quantized::examples);
+                           tanh_quantized::get_examples());
 }
 
 TEST_F(ValidationTest, tanh_quantized) {
   const Model model = tanh_quantized::createTestModel();
-  const std::vector<Request> requests = createRequests(tanh_quantized::examples);
+  const std::vector<Request> requests = createRequests(tanh_quantized::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16469,12 +17967,12 @@
   generated_tests::Execute(device,
                            tile_1::createTestModel,
                            tile_1::is_ignored,
-                           tile_1::examples);
+                           tile_1::get_examples());
 }
 
 TEST_F(ValidationTest, tile_1) {
   const Model model = tile_1::createTestModel();
-  const std::vector<Request> requests = createRequests(tile_1::examples);
+  const std::vector<Request> requests = createRequests(tile_1::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16484,12 +17982,12 @@
   generated_tests::Execute(device,
                            tile_1::createTestModel_relaxed,
                            tile_1::is_ignored_relaxed,
-                           tile_1::examples_relaxed);
+                           tile_1::get_examples_relaxed());
 }
 
 TEST_F(ValidationTest, tile_1_relaxed) {
   const Model model = tile_1::createTestModel_relaxed();
-  const std::vector<Request> requests = createRequests(tile_1::examples_relaxed);
+  const std::vector<Request> requests = createRequests(tile_1::get_examples_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16499,12 +17997,12 @@
   generated_tests::Execute(device,
                            tile_1::createTestModel_float16,
                            tile_1::is_ignored_float16,
-                           tile_1::examples_float16);
+                           tile_1::get_examples_float16());
 }
 
 TEST_F(ValidationTest, tile_1_float16) {
   const Model model = tile_1::createTestModel_float16();
-  const std::vector<Request> requests = createRequests(tile_1::examples_float16);
+  const std::vector<Request> requests = createRequests(tile_1::get_examples_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16514,12 +18012,12 @@
   generated_tests::Execute(device,
                            tile_1::createTestModel_quant8,
                            tile_1::is_ignored_quant8,
-                           tile_1::examples_quant8);
+                           tile_1::get_examples_quant8());
 }
 
 TEST_F(ValidationTest, tile_1_quant8) {
   const Model model = tile_1::createTestModel_quant8();
-  const std::vector<Request> requests = createRequests(tile_1::examples_quant8);
+  const std::vector<Request> requests = createRequests(tile_1::get_examples_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16537,12 +18035,12 @@
   generated_tests::Execute(device,
                            tile_2::createTestModel,
                            tile_2::is_ignored,
-                           tile_2::examples);
+                           tile_2::get_examples());
 }
 
 TEST_F(ValidationTest, tile_2) {
   const Model model = tile_2::createTestModel();
-  const std::vector<Request> requests = createRequests(tile_2::examples);
+  const std::vector<Request> requests = createRequests(tile_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16552,12 +18050,12 @@
   generated_tests::Execute(device,
                            tile_2::createTestModel_relaxed,
                            tile_2::is_ignored_relaxed,
-                           tile_2::examples_relaxed);
+                           tile_2::get_examples_relaxed());
 }
 
 TEST_F(ValidationTest, tile_2_relaxed) {
   const Model model = tile_2::createTestModel_relaxed();
-  const std::vector<Request> requests = createRequests(tile_2::examples_relaxed);
+  const std::vector<Request> requests = createRequests(tile_2::get_examples_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16567,12 +18065,12 @@
   generated_tests::Execute(device,
                            tile_2::createTestModel_float16,
                            tile_2::is_ignored_float16,
-                           tile_2::examples_float16);
+                           tile_2::get_examples_float16());
 }
 
 TEST_F(ValidationTest, tile_2_float16) {
   const Model model = tile_2::createTestModel_float16();
-  const std::vector<Request> requests = createRequests(tile_2::examples_float16);
+  const std::vector<Request> requests = createRequests(tile_2::get_examples_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16582,12 +18080,12 @@
   generated_tests::Execute(device,
                            tile_2::createTestModel_quant8,
                            tile_2::is_ignored_quant8,
-                           tile_2::examples_quant8);
+                           tile_2::get_examples_quant8());
 }
 
 TEST_F(ValidationTest, tile_2_quant8) {
   const Model model = tile_2::createTestModel_quant8();
-  const std::vector<Request> requests = createRequests(tile_2::examples_quant8);
+  const std::vector<Request> requests = createRequests(tile_2::get_examples_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16597,12 +18095,12 @@
   generated_tests::Execute(device,
                            tile_2::createTestModel_int32,
                            tile_2::is_ignored_int32,
-                           tile_2::examples_int32);
+                           tile_2::get_examples_int32());
 }
 
 TEST_F(ValidationTest, tile_2_int32) {
   const Model model = tile_2::createTestModel_int32();
-  const std::vector<Request> requests = createRequests(tile_2::examples_int32);
+  const std::vector<Request> requests = createRequests(tile_2::get_examples_int32());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16620,12 +18118,12 @@
   generated_tests::Execute(device,
                            tile_3::createTestModel,
                            tile_3::is_ignored,
-                           tile_3::examples);
+                           tile_3::get_examples());
 }
 
 TEST_F(ValidationTest, tile_3) {
   const Model model = tile_3::createTestModel();
-  const std::vector<Request> requests = createRequests(tile_3::examples);
+  const std::vector<Request> requests = createRequests(tile_3::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16635,12 +18133,12 @@
   generated_tests::Execute(device,
                            tile_3::createTestModel_relaxed,
                            tile_3::is_ignored_relaxed,
-                           tile_3::examples_relaxed);
+                           tile_3::get_examples_relaxed());
 }
 
 TEST_F(ValidationTest, tile_3_relaxed) {
   const Model model = tile_3::createTestModel_relaxed();
-  const std::vector<Request> requests = createRequests(tile_3::examples_relaxed);
+  const std::vector<Request> requests = createRequests(tile_3::get_examples_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16650,12 +18148,12 @@
   generated_tests::Execute(device,
                            tile_3::createTestModel_float16,
                            tile_3::is_ignored_float16,
-                           tile_3::examples_float16);
+                           tile_3::get_examples_float16());
 }
 
 TEST_F(ValidationTest, tile_3_float16) {
   const Model model = tile_3::createTestModel_float16();
-  const std::vector<Request> requests = createRequests(tile_3::examples_float16);
+  const std::vector<Request> requests = createRequests(tile_3::get_examples_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16665,12 +18163,12 @@
   generated_tests::Execute(device,
                            tile_3::createTestModel_quant8,
                            tile_3::is_ignored_quant8,
-                           tile_3::examples_quant8);
+                           tile_3::get_examples_quant8());
 }
 
 TEST_F(ValidationTest, tile_3_quant8) {
   const Model model = tile_3::createTestModel_quant8();
-  const std::vector<Request> requests = createRequests(tile_3::examples_quant8);
+  const std::vector<Request> requests = createRequests(tile_3::get_examples_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16680,12 +18178,12 @@
   generated_tests::Execute(device,
                            tile_3::createTestModel_int32,
                            tile_3::is_ignored_int32,
-                           tile_3::examples_int32);
+                           tile_3::get_examples_int32());
 }
 
 TEST_F(ValidationTest, tile_3_int32) {
   const Model model = tile_3::createTestModel_int32();
-  const std::vector<Request> requests = createRequests(tile_3::examples_int32);
+  const std::vector<Request> requests = createRequests(tile_3::get_examples_int32());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16703,12 +18201,12 @@
   generated_tests::Execute(device,
                            topk_v2::createTestModel,
                            topk_v2::is_ignored,
-                           topk_v2::examples);
+                           topk_v2::get_examples());
 }
 
 TEST_F(ValidationTest, topk_v2) {
   const Model model = topk_v2::createTestModel();
-  const std::vector<Request> requests = createRequests(topk_v2::examples);
+  const std::vector<Request> requests = createRequests(topk_v2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16718,12 +18216,12 @@
   generated_tests::Execute(device,
                            topk_v2::createTestModel_relaxed,
                            topk_v2::is_ignored_relaxed,
-                           topk_v2::examples_relaxed);
+                           topk_v2::get_examples_relaxed());
 }
 
 TEST_F(ValidationTest, topk_v2_relaxed) {
   const Model model = topk_v2::createTestModel_relaxed();
-  const std::vector<Request> requests = createRequests(topk_v2::examples_relaxed);
+  const std::vector<Request> requests = createRequests(topk_v2::get_examples_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16733,12 +18231,12 @@
   generated_tests::Execute(device,
                            topk_v2::createTestModel_float16,
                            topk_v2::is_ignored_float16,
-                           topk_v2::examples_float16);
+                           topk_v2::get_examples_float16());
 }
 
 TEST_F(ValidationTest, topk_v2_float16) {
   const Model model = topk_v2::createTestModel_float16();
-  const std::vector<Request> requests = createRequests(topk_v2::examples_float16);
+  const std::vector<Request> requests = createRequests(topk_v2::get_examples_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16748,12 +18246,12 @@
   generated_tests::Execute(device,
                            topk_v2::createTestModel_2,
                            topk_v2::is_ignored_2,
-                           topk_v2::examples_2);
+                           topk_v2::get_examples_2());
 }
 
 TEST_F(ValidationTest, topk_v2_2) {
   const Model model = topk_v2::createTestModel_2();
-  const std::vector<Request> requests = createRequests(topk_v2::examples_2);
+  const std::vector<Request> requests = createRequests(topk_v2::get_examples_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16763,12 +18261,12 @@
   generated_tests::Execute(device,
                            topk_v2::createTestModel_relaxed_2,
                            topk_v2::is_ignored_relaxed_2,
-                           topk_v2::examples_relaxed_2);
+                           topk_v2::get_examples_relaxed_2());
 }
 
 TEST_F(ValidationTest, topk_v2_relaxed_2) {
   const Model model = topk_v2::createTestModel_relaxed_2();
-  const std::vector<Request> requests = createRequests(topk_v2::examples_relaxed_2);
+  const std::vector<Request> requests = createRequests(topk_v2::get_examples_relaxed_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16778,12 +18276,12 @@
   generated_tests::Execute(device,
                            topk_v2::createTestModel_float16_2,
                            topk_v2::is_ignored_float16_2,
-                           topk_v2::examples_float16_2);
+                           topk_v2::get_examples_float16_2());
 }
 
 TEST_F(ValidationTest, topk_v2_float16_2) {
   const Model model = topk_v2::createTestModel_float16_2();
-  const std::vector<Request> requests = createRequests(topk_v2::examples_float16_2);
+  const std::vector<Request> requests = createRequests(topk_v2::get_examples_float16_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16793,12 +18291,12 @@
   generated_tests::Execute(device,
                            topk_v2::createTestModel_3,
                            topk_v2::is_ignored_3,
-                           topk_v2::examples_3);
+                           topk_v2::get_examples_3());
 }
 
 TEST_F(ValidationTest, topk_v2_3) {
   const Model model = topk_v2::createTestModel_3();
-  const std::vector<Request> requests = createRequests(topk_v2::examples_3);
+  const std::vector<Request> requests = createRequests(topk_v2::get_examples_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16808,12 +18306,12 @@
   generated_tests::Execute(device,
                            topk_v2::createTestModel_relaxed_3,
                            topk_v2::is_ignored_relaxed_3,
-                           topk_v2::examples_relaxed_3);
+                           topk_v2::get_examples_relaxed_3());
 }
 
 TEST_F(ValidationTest, topk_v2_relaxed_3) {
   const Model model = topk_v2::createTestModel_relaxed_3();
-  const std::vector<Request> requests = createRequests(topk_v2::examples_relaxed_3);
+  const std::vector<Request> requests = createRequests(topk_v2::get_examples_relaxed_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16823,12 +18321,12 @@
   generated_tests::Execute(device,
                            topk_v2::createTestModel_float16_3,
                            topk_v2::is_ignored_float16_3,
-                           topk_v2::examples_float16_3);
+                           topk_v2::get_examples_float16_3());
 }
 
 TEST_F(ValidationTest, topk_v2_float16_3) {
   const Model model = topk_v2::createTestModel_float16_3();
-  const std::vector<Request> requests = createRequests(topk_v2::examples_float16_3);
+  const std::vector<Request> requests = createRequests(topk_v2::get_examples_float16_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16838,12 +18336,12 @@
   generated_tests::Execute(device,
                            topk_v2::createTestModel_4,
                            topk_v2::is_ignored_4,
-                           topk_v2::examples_4);
+                           topk_v2::get_examples_4());
 }
 
 TEST_F(ValidationTest, topk_v2_4) {
   const Model model = topk_v2::createTestModel_4();
-  const std::vector<Request> requests = createRequests(topk_v2::examples_4);
+  const std::vector<Request> requests = createRequests(topk_v2::get_examples_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16853,12 +18351,12 @@
   generated_tests::Execute(device,
                            topk_v2::createTestModel_relaxed_4,
                            topk_v2::is_ignored_relaxed_4,
-                           topk_v2::examples_relaxed_4);
+                           topk_v2::get_examples_relaxed_4());
 }
 
 TEST_F(ValidationTest, topk_v2_relaxed_4) {
   const Model model = topk_v2::createTestModel_relaxed_4();
-  const std::vector<Request> requests = createRequests(topk_v2::examples_relaxed_4);
+  const std::vector<Request> requests = createRequests(topk_v2::get_examples_relaxed_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16868,12 +18366,12 @@
   generated_tests::Execute(device,
                            topk_v2::createTestModel_float16_4,
                            topk_v2::is_ignored_float16_4,
-                           topk_v2::examples_float16_4);
+                           topk_v2::get_examples_float16_4());
 }
 
 TEST_F(ValidationTest, topk_v2_float16_4) {
   const Model model = topk_v2::createTestModel_float16_4();
-  const std::vector<Request> requests = createRequests(topk_v2::examples_float16_4);
+  const std::vector<Request> requests = createRequests(topk_v2::get_examples_float16_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16883,12 +18381,12 @@
   generated_tests::Execute(device,
                            topk_v2::createTestModel_5,
                            topk_v2::is_ignored_5,
-                           topk_v2::examples_5);
+                           topk_v2::get_examples_5());
 }
 
 TEST_F(ValidationTest, topk_v2_5) {
   const Model model = topk_v2::createTestModel_5();
-  const std::vector<Request> requests = createRequests(topk_v2::examples_5);
+  const std::vector<Request> requests = createRequests(topk_v2::get_examples_5());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16898,12 +18396,12 @@
   generated_tests::Execute(device,
                            topk_v2::createTestModel_relaxed_5,
                            topk_v2::is_ignored_relaxed_5,
-                           topk_v2::examples_relaxed_5);
+                           topk_v2::get_examples_relaxed_5());
 }
 
 TEST_F(ValidationTest, topk_v2_relaxed_5) {
   const Model model = topk_v2::createTestModel_relaxed_5();
-  const std::vector<Request> requests = createRequests(topk_v2::examples_relaxed_5);
+  const std::vector<Request> requests = createRequests(topk_v2::get_examples_relaxed_5());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16913,12 +18411,12 @@
   generated_tests::Execute(device,
                            topk_v2::createTestModel_float16_5,
                            topk_v2::is_ignored_float16_5,
-                           topk_v2::examples_float16_5);
+                           topk_v2::get_examples_float16_5());
 }
 
 TEST_F(ValidationTest, topk_v2_float16_5) {
   const Model model = topk_v2::createTestModel_float16_5();
-  const std::vector<Request> requests = createRequests(topk_v2::examples_float16_5);
+  const std::vector<Request> requests = createRequests(topk_v2::get_examples_float16_5());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16928,12 +18426,12 @@
   generated_tests::Execute(device,
                            topk_v2::createTestModel_6,
                            topk_v2::is_ignored_6,
-                           topk_v2::examples_6);
+                           topk_v2::get_examples_6());
 }
 
 TEST_F(ValidationTest, topk_v2_6) {
   const Model model = topk_v2::createTestModel_6();
-  const std::vector<Request> requests = createRequests(topk_v2::examples_6);
+  const std::vector<Request> requests = createRequests(topk_v2::get_examples_6());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16943,12 +18441,12 @@
   generated_tests::Execute(device,
                            topk_v2::createTestModel_relaxed_6,
                            topk_v2::is_ignored_relaxed_6,
-                           topk_v2::examples_relaxed_6);
+                           topk_v2::get_examples_relaxed_6());
 }
 
 TEST_F(ValidationTest, topk_v2_relaxed_6) {
   const Model model = topk_v2::createTestModel_relaxed_6();
-  const std::vector<Request> requests = createRequests(topk_v2::examples_relaxed_6);
+  const std::vector<Request> requests = createRequests(topk_v2::get_examples_relaxed_6());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16958,12 +18456,12 @@
   generated_tests::Execute(device,
                            topk_v2::createTestModel_float16_6,
                            topk_v2::is_ignored_float16_6,
-                           topk_v2::examples_float16_6);
+                           topk_v2::get_examples_float16_6());
 }
 
 TEST_F(ValidationTest, topk_v2_float16_6) {
   const Model model = topk_v2::createTestModel_float16_6();
-  const std::vector<Request> requests = createRequests(topk_v2::examples_float16_6);
+  const std::vector<Request> requests = createRequests(topk_v2::get_examples_float16_6());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16981,12 +18479,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_none,
                            transpose_conv2d::is_ignored_nhwc_none,
-                           transpose_conv2d::examples_nhwc_none);
+                           transpose_conv2d::get_examples_nhwc_none());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_none) {
   const Model model = transpose_conv2d::createTestModel_nhwc_none();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_none);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_none());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -16996,12 +18494,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_none_weight_as_input,
                            transpose_conv2d::is_ignored_nhwc_none_weight_as_input,
-                           transpose_conv2d::examples_nhwc_none_weight_as_input);
+                           transpose_conv2d::get_examples_nhwc_none_weight_as_input());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_none_weight_as_input) {
   const Model model = transpose_conv2d::createTestModel_nhwc_none_weight_as_input();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_none_weight_as_input);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_none_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17011,12 +18509,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_none_relaxed,
                            transpose_conv2d::is_ignored_nhwc_none_relaxed,
-                           transpose_conv2d::examples_nhwc_none_relaxed);
+                           transpose_conv2d::get_examples_nhwc_none_relaxed());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_none_relaxed) {
   const Model model = transpose_conv2d::createTestModel_nhwc_none_relaxed();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_none_relaxed);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_none_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17026,12 +18524,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_none_relaxed_weight_as_input,
                            transpose_conv2d::is_ignored_nhwc_none_relaxed_weight_as_input,
-                           transpose_conv2d::examples_nhwc_none_relaxed_weight_as_input);
+                           transpose_conv2d::get_examples_nhwc_none_relaxed_weight_as_input());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_none_relaxed_weight_as_input) {
   const Model model = transpose_conv2d::createTestModel_nhwc_none_relaxed_weight_as_input();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_none_relaxed_weight_as_input);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_none_relaxed_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17041,12 +18539,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_none_quant8,
                            transpose_conv2d::is_ignored_nhwc_none_quant8,
-                           transpose_conv2d::examples_nhwc_none_quant8);
+                           transpose_conv2d::get_examples_nhwc_none_quant8());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_none_quant8) {
   const Model model = transpose_conv2d::createTestModel_nhwc_none_quant8();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_none_quant8);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_none_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17056,12 +18554,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_none_quant8_weight_as_input,
                            transpose_conv2d::is_ignored_nhwc_none_quant8_weight_as_input,
-                           transpose_conv2d::examples_nhwc_none_quant8_weight_as_input);
+                           transpose_conv2d::get_examples_nhwc_none_quant8_weight_as_input());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_none_quant8_weight_as_input) {
   const Model model = transpose_conv2d::createTestModel_nhwc_none_quant8_weight_as_input();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_none_quant8_weight_as_input);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_none_quant8_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17071,12 +18569,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_relu,
                            transpose_conv2d::is_ignored_nhwc_relu,
-                           transpose_conv2d::examples_nhwc_relu);
+                           transpose_conv2d::get_examples_nhwc_relu());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_relu) {
   const Model model = transpose_conv2d::createTestModel_nhwc_relu();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_relu);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_relu());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17086,12 +18584,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_relu_weight_as_input,
                            transpose_conv2d::is_ignored_nhwc_relu_weight_as_input,
-                           transpose_conv2d::examples_nhwc_relu_weight_as_input);
+                           transpose_conv2d::get_examples_nhwc_relu_weight_as_input());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_relu_weight_as_input) {
   const Model model = transpose_conv2d::createTestModel_nhwc_relu_weight_as_input();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_relu_weight_as_input);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_relu_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17101,12 +18599,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_relu_relaxed,
                            transpose_conv2d::is_ignored_nhwc_relu_relaxed,
-                           transpose_conv2d::examples_nhwc_relu_relaxed);
+                           transpose_conv2d::get_examples_nhwc_relu_relaxed());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_relu_relaxed) {
   const Model model = transpose_conv2d::createTestModel_nhwc_relu_relaxed();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_relu_relaxed);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_relu_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17116,12 +18614,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_relu_relaxed_weight_as_input,
                            transpose_conv2d::is_ignored_nhwc_relu_relaxed_weight_as_input,
-                           transpose_conv2d::examples_nhwc_relu_relaxed_weight_as_input);
+                           transpose_conv2d::get_examples_nhwc_relu_relaxed_weight_as_input());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_relu_relaxed_weight_as_input) {
   const Model model = transpose_conv2d::createTestModel_nhwc_relu_relaxed_weight_as_input();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_relu_relaxed_weight_as_input);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_relu_relaxed_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17131,12 +18629,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_relu_quant8,
                            transpose_conv2d::is_ignored_nhwc_relu_quant8,
-                           transpose_conv2d::examples_nhwc_relu_quant8);
+                           transpose_conv2d::get_examples_nhwc_relu_quant8());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_relu_quant8) {
   const Model model = transpose_conv2d::createTestModel_nhwc_relu_quant8();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_relu_quant8);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_relu_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17146,12 +18644,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_relu_quant8_weight_as_input,
                            transpose_conv2d::is_ignored_nhwc_relu_quant8_weight_as_input,
-                           transpose_conv2d::examples_nhwc_relu_quant8_weight_as_input);
+                           transpose_conv2d::get_examples_nhwc_relu_quant8_weight_as_input());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_relu_quant8_weight_as_input) {
   const Model model = transpose_conv2d::createTestModel_nhwc_relu_quant8_weight_as_input();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_relu_quant8_weight_as_input);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_relu_quant8_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17161,12 +18659,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_relu1,
                            transpose_conv2d::is_ignored_nhwc_relu1,
-                           transpose_conv2d::examples_nhwc_relu1);
+                           transpose_conv2d::get_examples_nhwc_relu1());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_relu1) {
   const Model model = transpose_conv2d::createTestModel_nhwc_relu1();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_relu1);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_relu1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17176,12 +18674,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_relu1_weight_as_input,
                            transpose_conv2d::is_ignored_nhwc_relu1_weight_as_input,
-                           transpose_conv2d::examples_nhwc_relu1_weight_as_input);
+                           transpose_conv2d::get_examples_nhwc_relu1_weight_as_input());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_relu1_weight_as_input) {
   const Model model = transpose_conv2d::createTestModel_nhwc_relu1_weight_as_input();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_relu1_weight_as_input);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_relu1_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17191,12 +18689,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_relu1_relaxed,
                            transpose_conv2d::is_ignored_nhwc_relu1_relaxed,
-                           transpose_conv2d::examples_nhwc_relu1_relaxed);
+                           transpose_conv2d::get_examples_nhwc_relu1_relaxed());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_relu1_relaxed) {
   const Model model = transpose_conv2d::createTestModel_nhwc_relu1_relaxed();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_relu1_relaxed);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_relu1_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17206,12 +18704,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_relu1_relaxed_weight_as_input,
                            transpose_conv2d::is_ignored_nhwc_relu1_relaxed_weight_as_input,
-                           transpose_conv2d::examples_nhwc_relu1_relaxed_weight_as_input);
+                           transpose_conv2d::get_examples_nhwc_relu1_relaxed_weight_as_input());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_relu1_relaxed_weight_as_input) {
   const Model model = transpose_conv2d::createTestModel_nhwc_relu1_relaxed_weight_as_input();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_relu1_relaxed_weight_as_input);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_relu1_relaxed_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17221,12 +18719,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_relu1_quant8,
                            transpose_conv2d::is_ignored_nhwc_relu1_quant8,
-                           transpose_conv2d::examples_nhwc_relu1_quant8);
+                           transpose_conv2d::get_examples_nhwc_relu1_quant8());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_relu1_quant8) {
   const Model model = transpose_conv2d::createTestModel_nhwc_relu1_quant8();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_relu1_quant8);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_relu1_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17236,12 +18734,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_relu1_quant8_weight_as_input,
                            transpose_conv2d::is_ignored_nhwc_relu1_quant8_weight_as_input,
-                           transpose_conv2d::examples_nhwc_relu1_quant8_weight_as_input);
+                           transpose_conv2d::get_examples_nhwc_relu1_quant8_weight_as_input());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_relu1_quant8_weight_as_input) {
   const Model model = transpose_conv2d::createTestModel_nhwc_relu1_quant8_weight_as_input();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_relu1_quant8_weight_as_input);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_relu1_quant8_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17251,12 +18749,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_relu6,
                            transpose_conv2d::is_ignored_nhwc_relu6,
-                           transpose_conv2d::examples_nhwc_relu6);
+                           transpose_conv2d::get_examples_nhwc_relu6());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_relu6) {
   const Model model = transpose_conv2d::createTestModel_nhwc_relu6();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_relu6);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_relu6());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17266,12 +18764,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_relu6_weight_as_input,
                            transpose_conv2d::is_ignored_nhwc_relu6_weight_as_input,
-                           transpose_conv2d::examples_nhwc_relu6_weight_as_input);
+                           transpose_conv2d::get_examples_nhwc_relu6_weight_as_input());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_relu6_weight_as_input) {
   const Model model = transpose_conv2d::createTestModel_nhwc_relu6_weight_as_input();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_relu6_weight_as_input);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_relu6_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17281,12 +18779,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_relu6_relaxed,
                            transpose_conv2d::is_ignored_nhwc_relu6_relaxed,
-                           transpose_conv2d::examples_nhwc_relu6_relaxed);
+                           transpose_conv2d::get_examples_nhwc_relu6_relaxed());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_relu6_relaxed) {
   const Model model = transpose_conv2d::createTestModel_nhwc_relu6_relaxed();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_relu6_relaxed);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_relu6_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17296,12 +18794,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_relu6_relaxed_weight_as_input,
                            transpose_conv2d::is_ignored_nhwc_relu6_relaxed_weight_as_input,
-                           transpose_conv2d::examples_nhwc_relu6_relaxed_weight_as_input);
+                           transpose_conv2d::get_examples_nhwc_relu6_relaxed_weight_as_input());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_relu6_relaxed_weight_as_input) {
   const Model model = transpose_conv2d::createTestModel_nhwc_relu6_relaxed_weight_as_input();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_relu6_relaxed_weight_as_input);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_relu6_relaxed_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17311,12 +18809,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_relu6_quant8,
                            transpose_conv2d::is_ignored_nhwc_relu6_quant8,
-                           transpose_conv2d::examples_nhwc_relu6_quant8);
+                           transpose_conv2d::get_examples_nhwc_relu6_quant8());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_relu6_quant8) {
   const Model model = transpose_conv2d::createTestModel_nhwc_relu6_quant8();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_relu6_quant8);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_relu6_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17326,12 +18824,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_relu6_quant8_weight_as_input,
                            transpose_conv2d::is_ignored_nhwc_relu6_quant8_weight_as_input,
-                           transpose_conv2d::examples_nhwc_relu6_quant8_weight_as_input);
+                           transpose_conv2d::get_examples_nhwc_relu6_quant8_weight_as_input());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_relu6_quant8_weight_as_input) {
   const Model model = transpose_conv2d::createTestModel_nhwc_relu6_quant8_weight_as_input();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_relu6_quant8_weight_as_input);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_relu6_quant8_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17341,12 +18839,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_none,
                            transpose_conv2d::is_ignored_nchw_none,
-                           transpose_conv2d::examples_nchw_none);
+                           transpose_conv2d::get_examples_nchw_none());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_none) {
   const Model model = transpose_conv2d::createTestModel_nchw_none();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_none);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_none());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17356,12 +18854,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_none_weight_as_input,
                            transpose_conv2d::is_ignored_nchw_none_weight_as_input,
-                           transpose_conv2d::examples_nchw_none_weight_as_input);
+                           transpose_conv2d::get_examples_nchw_none_weight_as_input());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_none_weight_as_input) {
   const Model model = transpose_conv2d::createTestModel_nchw_none_weight_as_input();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_none_weight_as_input);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_none_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17371,12 +18869,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_none_relaxed,
                            transpose_conv2d::is_ignored_nchw_none_relaxed,
-                           transpose_conv2d::examples_nchw_none_relaxed);
+                           transpose_conv2d::get_examples_nchw_none_relaxed());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_none_relaxed) {
   const Model model = transpose_conv2d::createTestModel_nchw_none_relaxed();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_none_relaxed);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_none_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17386,12 +18884,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_none_relaxed_weight_as_input,
                            transpose_conv2d::is_ignored_nchw_none_relaxed_weight_as_input,
-                           transpose_conv2d::examples_nchw_none_relaxed_weight_as_input);
+                           transpose_conv2d::get_examples_nchw_none_relaxed_weight_as_input());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_none_relaxed_weight_as_input) {
   const Model model = transpose_conv2d::createTestModel_nchw_none_relaxed_weight_as_input();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_none_relaxed_weight_as_input);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_none_relaxed_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17401,12 +18899,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_none_quant8,
                            transpose_conv2d::is_ignored_nchw_none_quant8,
-                           transpose_conv2d::examples_nchw_none_quant8);
+                           transpose_conv2d::get_examples_nchw_none_quant8());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_none_quant8) {
   const Model model = transpose_conv2d::createTestModel_nchw_none_quant8();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_none_quant8);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_none_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17416,12 +18914,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_none_quant8_weight_as_input,
                            transpose_conv2d::is_ignored_nchw_none_quant8_weight_as_input,
-                           transpose_conv2d::examples_nchw_none_quant8_weight_as_input);
+                           transpose_conv2d::get_examples_nchw_none_quant8_weight_as_input());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_none_quant8_weight_as_input) {
   const Model model = transpose_conv2d::createTestModel_nchw_none_quant8_weight_as_input();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_none_quant8_weight_as_input);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_none_quant8_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17431,12 +18929,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_relu,
                            transpose_conv2d::is_ignored_nchw_relu,
-                           transpose_conv2d::examples_nchw_relu);
+                           transpose_conv2d::get_examples_nchw_relu());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_relu) {
   const Model model = transpose_conv2d::createTestModel_nchw_relu();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_relu);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_relu());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17446,12 +18944,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_relu_weight_as_input,
                            transpose_conv2d::is_ignored_nchw_relu_weight_as_input,
-                           transpose_conv2d::examples_nchw_relu_weight_as_input);
+                           transpose_conv2d::get_examples_nchw_relu_weight_as_input());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_relu_weight_as_input) {
   const Model model = transpose_conv2d::createTestModel_nchw_relu_weight_as_input();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_relu_weight_as_input);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_relu_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17461,12 +18959,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_relu_relaxed,
                            transpose_conv2d::is_ignored_nchw_relu_relaxed,
-                           transpose_conv2d::examples_nchw_relu_relaxed);
+                           transpose_conv2d::get_examples_nchw_relu_relaxed());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_relu_relaxed) {
   const Model model = transpose_conv2d::createTestModel_nchw_relu_relaxed();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_relu_relaxed);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_relu_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17476,12 +18974,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_relu_relaxed_weight_as_input,
                            transpose_conv2d::is_ignored_nchw_relu_relaxed_weight_as_input,
-                           transpose_conv2d::examples_nchw_relu_relaxed_weight_as_input);
+                           transpose_conv2d::get_examples_nchw_relu_relaxed_weight_as_input());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_relu_relaxed_weight_as_input) {
   const Model model = transpose_conv2d::createTestModel_nchw_relu_relaxed_weight_as_input();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_relu_relaxed_weight_as_input);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_relu_relaxed_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17491,12 +18989,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_relu_quant8,
                            transpose_conv2d::is_ignored_nchw_relu_quant8,
-                           transpose_conv2d::examples_nchw_relu_quant8);
+                           transpose_conv2d::get_examples_nchw_relu_quant8());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_relu_quant8) {
   const Model model = transpose_conv2d::createTestModel_nchw_relu_quant8();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_relu_quant8);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_relu_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17506,12 +19004,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_relu_quant8_weight_as_input,
                            transpose_conv2d::is_ignored_nchw_relu_quant8_weight_as_input,
-                           transpose_conv2d::examples_nchw_relu_quant8_weight_as_input);
+                           transpose_conv2d::get_examples_nchw_relu_quant8_weight_as_input());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_relu_quant8_weight_as_input) {
   const Model model = transpose_conv2d::createTestModel_nchw_relu_quant8_weight_as_input();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_relu_quant8_weight_as_input);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_relu_quant8_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17521,12 +19019,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_relu1,
                            transpose_conv2d::is_ignored_nchw_relu1,
-                           transpose_conv2d::examples_nchw_relu1);
+                           transpose_conv2d::get_examples_nchw_relu1());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_relu1) {
   const Model model = transpose_conv2d::createTestModel_nchw_relu1();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_relu1);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_relu1());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17536,12 +19034,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_relu1_weight_as_input,
                            transpose_conv2d::is_ignored_nchw_relu1_weight_as_input,
-                           transpose_conv2d::examples_nchw_relu1_weight_as_input);
+                           transpose_conv2d::get_examples_nchw_relu1_weight_as_input());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_relu1_weight_as_input) {
   const Model model = transpose_conv2d::createTestModel_nchw_relu1_weight_as_input();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_relu1_weight_as_input);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_relu1_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17551,12 +19049,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_relu1_relaxed,
                            transpose_conv2d::is_ignored_nchw_relu1_relaxed,
-                           transpose_conv2d::examples_nchw_relu1_relaxed);
+                           transpose_conv2d::get_examples_nchw_relu1_relaxed());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_relu1_relaxed) {
   const Model model = transpose_conv2d::createTestModel_nchw_relu1_relaxed();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_relu1_relaxed);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_relu1_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17566,12 +19064,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_relu1_relaxed_weight_as_input,
                            transpose_conv2d::is_ignored_nchw_relu1_relaxed_weight_as_input,
-                           transpose_conv2d::examples_nchw_relu1_relaxed_weight_as_input);
+                           transpose_conv2d::get_examples_nchw_relu1_relaxed_weight_as_input());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_relu1_relaxed_weight_as_input) {
   const Model model = transpose_conv2d::createTestModel_nchw_relu1_relaxed_weight_as_input();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_relu1_relaxed_weight_as_input);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_relu1_relaxed_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17581,12 +19079,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_relu1_quant8,
                            transpose_conv2d::is_ignored_nchw_relu1_quant8,
-                           transpose_conv2d::examples_nchw_relu1_quant8);
+                           transpose_conv2d::get_examples_nchw_relu1_quant8());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_relu1_quant8) {
   const Model model = transpose_conv2d::createTestModel_nchw_relu1_quant8();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_relu1_quant8);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_relu1_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17596,12 +19094,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_relu1_quant8_weight_as_input,
                            transpose_conv2d::is_ignored_nchw_relu1_quant8_weight_as_input,
-                           transpose_conv2d::examples_nchw_relu1_quant8_weight_as_input);
+                           transpose_conv2d::get_examples_nchw_relu1_quant8_weight_as_input());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_relu1_quant8_weight_as_input) {
   const Model model = transpose_conv2d::createTestModel_nchw_relu1_quant8_weight_as_input();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_relu1_quant8_weight_as_input);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_relu1_quant8_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17611,12 +19109,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_relu6,
                            transpose_conv2d::is_ignored_nchw_relu6,
-                           transpose_conv2d::examples_nchw_relu6);
+                           transpose_conv2d::get_examples_nchw_relu6());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_relu6) {
   const Model model = transpose_conv2d::createTestModel_nchw_relu6();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_relu6);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_relu6());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17626,12 +19124,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_relu6_weight_as_input,
                            transpose_conv2d::is_ignored_nchw_relu6_weight_as_input,
-                           transpose_conv2d::examples_nchw_relu6_weight_as_input);
+                           transpose_conv2d::get_examples_nchw_relu6_weight_as_input());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_relu6_weight_as_input) {
   const Model model = transpose_conv2d::createTestModel_nchw_relu6_weight_as_input();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_relu6_weight_as_input);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_relu6_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17641,12 +19139,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_relu6_relaxed,
                            transpose_conv2d::is_ignored_nchw_relu6_relaxed,
-                           transpose_conv2d::examples_nchw_relu6_relaxed);
+                           transpose_conv2d::get_examples_nchw_relu6_relaxed());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_relu6_relaxed) {
   const Model model = transpose_conv2d::createTestModel_nchw_relu6_relaxed();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_relu6_relaxed);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_relu6_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17656,12 +19154,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_relu6_relaxed_weight_as_input,
                            transpose_conv2d::is_ignored_nchw_relu6_relaxed_weight_as_input,
-                           transpose_conv2d::examples_nchw_relu6_relaxed_weight_as_input);
+                           transpose_conv2d::get_examples_nchw_relu6_relaxed_weight_as_input());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_relu6_relaxed_weight_as_input) {
   const Model model = transpose_conv2d::createTestModel_nchw_relu6_relaxed_weight_as_input();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_relu6_relaxed_weight_as_input);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_relu6_relaxed_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17671,12 +19169,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_relu6_quant8,
                            transpose_conv2d::is_ignored_nchw_relu6_quant8,
-                           transpose_conv2d::examples_nchw_relu6_quant8);
+                           transpose_conv2d::get_examples_nchw_relu6_quant8());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_relu6_quant8) {
   const Model model = transpose_conv2d::createTestModel_nchw_relu6_quant8();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_relu6_quant8);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_relu6_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17686,12 +19184,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_relu6_quant8_weight_as_input,
                            transpose_conv2d::is_ignored_nchw_relu6_quant8_weight_as_input,
-                           transpose_conv2d::examples_nchw_relu6_quant8_weight_as_input);
+                           transpose_conv2d::get_examples_nchw_relu6_quant8_weight_as_input());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_relu6_quant8_weight_as_input) {
   const Model model = transpose_conv2d::createTestModel_nchw_relu6_quant8_weight_as_input();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_relu6_quant8_weight_as_input);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_relu6_quant8_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17701,12 +19199,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc,
                            transpose_conv2d::is_ignored_nhwc,
-                           transpose_conv2d::examples_nhwc);
+                           transpose_conv2d::get_examples_nhwc());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc) {
   const Model model = transpose_conv2d::createTestModel_nhwc();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17716,12 +19214,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_weight_as_input,
                            transpose_conv2d::is_ignored_nhwc_weight_as_input,
-                           transpose_conv2d::examples_nhwc_weight_as_input);
+                           transpose_conv2d::get_examples_nhwc_weight_as_input());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_weight_as_input) {
   const Model model = transpose_conv2d::createTestModel_nhwc_weight_as_input();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_weight_as_input);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17731,12 +19229,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_relaxed,
                            transpose_conv2d::is_ignored_nhwc_relaxed,
-                           transpose_conv2d::examples_nhwc_relaxed);
+                           transpose_conv2d::get_examples_nhwc_relaxed());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_relaxed) {
   const Model model = transpose_conv2d::createTestModel_nhwc_relaxed();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_relaxed);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17746,12 +19244,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_relaxed_weight_as_input,
                            transpose_conv2d::is_ignored_nhwc_relaxed_weight_as_input,
-                           transpose_conv2d::examples_nhwc_relaxed_weight_as_input);
+                           transpose_conv2d::get_examples_nhwc_relaxed_weight_as_input());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_relaxed_weight_as_input) {
   const Model model = transpose_conv2d::createTestModel_nhwc_relaxed_weight_as_input();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_relaxed_weight_as_input);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_relaxed_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17761,12 +19259,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_quant8,
                            transpose_conv2d::is_ignored_nhwc_quant8,
-                           transpose_conv2d::examples_nhwc_quant8);
+                           transpose_conv2d::get_examples_nhwc_quant8());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_quant8) {
   const Model model = transpose_conv2d::createTestModel_nhwc_quant8();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_quant8);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17776,12 +19274,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_quant8_weight_as_input,
                            transpose_conv2d::is_ignored_nhwc_quant8_weight_as_input,
-                           transpose_conv2d::examples_nhwc_quant8_weight_as_input);
+                           transpose_conv2d::get_examples_nhwc_quant8_weight_as_input());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_quant8_weight_as_input) {
   const Model model = transpose_conv2d::createTestModel_nhwc_quant8_weight_as_input();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_quant8_weight_as_input);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_quant8_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17791,12 +19289,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw,
                            transpose_conv2d::is_ignored_nchw,
-                           transpose_conv2d::examples_nchw);
+                           transpose_conv2d::get_examples_nchw());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw) {
   const Model model = transpose_conv2d::createTestModel_nchw();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17806,12 +19304,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_weight_as_input,
                            transpose_conv2d::is_ignored_nchw_weight_as_input,
-                           transpose_conv2d::examples_nchw_weight_as_input);
+                           transpose_conv2d::get_examples_nchw_weight_as_input());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_weight_as_input) {
   const Model model = transpose_conv2d::createTestModel_nchw_weight_as_input();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_weight_as_input);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17821,12 +19319,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_relaxed,
                            transpose_conv2d::is_ignored_nchw_relaxed,
-                           transpose_conv2d::examples_nchw_relaxed);
+                           transpose_conv2d::get_examples_nchw_relaxed());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_relaxed) {
   const Model model = transpose_conv2d::createTestModel_nchw_relaxed();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_relaxed);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17836,12 +19334,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_relaxed_weight_as_input,
                            transpose_conv2d::is_ignored_nchw_relaxed_weight_as_input,
-                           transpose_conv2d::examples_nchw_relaxed_weight_as_input);
+                           transpose_conv2d::get_examples_nchw_relaxed_weight_as_input());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_relaxed_weight_as_input) {
   const Model model = transpose_conv2d::createTestModel_nchw_relaxed_weight_as_input();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_relaxed_weight_as_input);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_relaxed_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17851,12 +19349,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_quant8,
                            transpose_conv2d::is_ignored_nchw_quant8,
-                           transpose_conv2d::examples_nchw_quant8);
+                           transpose_conv2d::get_examples_nchw_quant8());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_quant8) {
   const Model model = transpose_conv2d::createTestModel_nchw_quant8();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_quant8);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17866,12 +19364,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_quant8_weight_as_input,
                            transpose_conv2d::is_ignored_nchw_quant8_weight_as_input,
-                           transpose_conv2d::examples_nchw_quant8_weight_as_input);
+                           transpose_conv2d::get_examples_nchw_quant8_weight_as_input());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_quant8_weight_as_input) {
   const Model model = transpose_conv2d::createTestModel_nchw_quant8_weight_as_input();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_quant8_weight_as_input);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_quant8_weight_as_input());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17881,12 +19379,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_2,
                            transpose_conv2d::is_ignored_nhwc_2,
-                           transpose_conv2d::examples_nhwc_2);
+                           transpose_conv2d::get_examples_nhwc_2());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_2) {
   const Model model = transpose_conv2d::createTestModel_nhwc_2();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_2);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17896,12 +19394,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_weight_as_input_2,
                            transpose_conv2d::is_ignored_nhwc_weight_as_input_2,
-                           transpose_conv2d::examples_nhwc_weight_as_input_2);
+                           transpose_conv2d::get_examples_nhwc_weight_as_input_2());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_weight_as_input_2) {
   const Model model = transpose_conv2d::createTestModel_nhwc_weight_as_input_2();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_weight_as_input_2);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_weight_as_input_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17911,12 +19409,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_relaxed_2,
                            transpose_conv2d::is_ignored_nhwc_relaxed_2,
-                           transpose_conv2d::examples_nhwc_relaxed_2);
+                           transpose_conv2d::get_examples_nhwc_relaxed_2());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_relaxed_2) {
   const Model model = transpose_conv2d::createTestModel_nhwc_relaxed_2();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_relaxed_2);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_relaxed_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17926,12 +19424,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_relaxed_weight_as_input_2,
                            transpose_conv2d::is_ignored_nhwc_relaxed_weight_as_input_2,
-                           transpose_conv2d::examples_nhwc_relaxed_weight_as_input_2);
+                           transpose_conv2d::get_examples_nhwc_relaxed_weight_as_input_2());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_relaxed_weight_as_input_2) {
   const Model model = transpose_conv2d::createTestModel_nhwc_relaxed_weight_as_input_2();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_relaxed_weight_as_input_2);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_relaxed_weight_as_input_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17941,12 +19439,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_quant8_2,
                            transpose_conv2d::is_ignored_nhwc_quant8_2,
-                           transpose_conv2d::examples_nhwc_quant8_2);
+                           transpose_conv2d::get_examples_nhwc_quant8_2());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_quant8_2) {
   const Model model = transpose_conv2d::createTestModel_nhwc_quant8_2();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_quant8_2);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17956,12 +19454,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_quant8_weight_as_input_2,
                            transpose_conv2d::is_ignored_nhwc_quant8_weight_as_input_2,
-                           transpose_conv2d::examples_nhwc_quant8_weight_as_input_2);
+                           transpose_conv2d::get_examples_nhwc_quant8_weight_as_input_2());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_quant8_weight_as_input_2) {
   const Model model = transpose_conv2d::createTestModel_nhwc_quant8_weight_as_input_2();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_quant8_weight_as_input_2);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_quant8_weight_as_input_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17971,12 +19469,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_2,
                            transpose_conv2d::is_ignored_nchw_2,
-                           transpose_conv2d::examples_nchw_2);
+                           transpose_conv2d::get_examples_nchw_2());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_2) {
   const Model model = transpose_conv2d::createTestModel_nchw_2();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_2);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -17986,12 +19484,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_weight_as_input_2,
                            transpose_conv2d::is_ignored_nchw_weight_as_input_2,
-                           transpose_conv2d::examples_nchw_weight_as_input_2);
+                           transpose_conv2d::get_examples_nchw_weight_as_input_2());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_weight_as_input_2) {
   const Model model = transpose_conv2d::createTestModel_nchw_weight_as_input_2();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_weight_as_input_2);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_weight_as_input_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18001,12 +19499,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_relaxed_2,
                            transpose_conv2d::is_ignored_nchw_relaxed_2,
-                           transpose_conv2d::examples_nchw_relaxed_2);
+                           transpose_conv2d::get_examples_nchw_relaxed_2());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_relaxed_2) {
   const Model model = transpose_conv2d::createTestModel_nchw_relaxed_2();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_relaxed_2);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_relaxed_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18016,12 +19514,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_relaxed_weight_as_input_2,
                            transpose_conv2d::is_ignored_nchw_relaxed_weight_as_input_2,
-                           transpose_conv2d::examples_nchw_relaxed_weight_as_input_2);
+                           transpose_conv2d::get_examples_nchw_relaxed_weight_as_input_2());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_relaxed_weight_as_input_2) {
   const Model model = transpose_conv2d::createTestModel_nchw_relaxed_weight_as_input_2();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_relaxed_weight_as_input_2);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_relaxed_weight_as_input_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18031,12 +19529,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_quant8_2,
                            transpose_conv2d::is_ignored_nchw_quant8_2,
-                           transpose_conv2d::examples_nchw_quant8_2);
+                           transpose_conv2d::get_examples_nchw_quant8_2());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_quant8_2) {
   const Model model = transpose_conv2d::createTestModel_nchw_quant8_2();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_quant8_2);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_quant8_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18046,12 +19544,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_quant8_weight_as_input_2,
                            transpose_conv2d::is_ignored_nchw_quant8_weight_as_input_2,
-                           transpose_conv2d::examples_nchw_quant8_weight_as_input_2);
+                           transpose_conv2d::get_examples_nchw_quant8_weight_as_input_2());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_quant8_weight_as_input_2) {
   const Model model = transpose_conv2d::createTestModel_nchw_quant8_weight_as_input_2();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_quant8_weight_as_input_2);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_quant8_weight_as_input_2());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18061,12 +19559,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_3,
                            transpose_conv2d::is_ignored_nhwc_3,
-                           transpose_conv2d::examples_nhwc_3);
+                           transpose_conv2d::get_examples_nhwc_3());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_3) {
   const Model model = transpose_conv2d::createTestModel_nhwc_3();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_3);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18076,12 +19574,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_weight_as_input_3,
                            transpose_conv2d::is_ignored_nhwc_weight_as_input_3,
-                           transpose_conv2d::examples_nhwc_weight_as_input_3);
+                           transpose_conv2d::get_examples_nhwc_weight_as_input_3());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_weight_as_input_3) {
   const Model model = transpose_conv2d::createTestModel_nhwc_weight_as_input_3();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_weight_as_input_3);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_weight_as_input_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18091,12 +19589,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_relaxed_3,
                            transpose_conv2d::is_ignored_nhwc_relaxed_3,
-                           transpose_conv2d::examples_nhwc_relaxed_3);
+                           transpose_conv2d::get_examples_nhwc_relaxed_3());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_relaxed_3) {
   const Model model = transpose_conv2d::createTestModel_nhwc_relaxed_3();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_relaxed_3);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_relaxed_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18106,12 +19604,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_relaxed_weight_as_input_3,
                            transpose_conv2d::is_ignored_nhwc_relaxed_weight_as_input_3,
-                           transpose_conv2d::examples_nhwc_relaxed_weight_as_input_3);
+                           transpose_conv2d::get_examples_nhwc_relaxed_weight_as_input_3());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_relaxed_weight_as_input_3) {
   const Model model = transpose_conv2d::createTestModel_nhwc_relaxed_weight_as_input_3();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_relaxed_weight_as_input_3);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_relaxed_weight_as_input_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18121,12 +19619,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_quant8_3,
                            transpose_conv2d::is_ignored_nhwc_quant8_3,
-                           transpose_conv2d::examples_nhwc_quant8_3);
+                           transpose_conv2d::get_examples_nhwc_quant8_3());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_quant8_3) {
   const Model model = transpose_conv2d::createTestModel_nhwc_quant8_3();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_quant8_3);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_quant8_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18136,12 +19634,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_quant8_weight_as_input_3,
                            transpose_conv2d::is_ignored_nhwc_quant8_weight_as_input_3,
-                           transpose_conv2d::examples_nhwc_quant8_weight_as_input_3);
+                           transpose_conv2d::get_examples_nhwc_quant8_weight_as_input_3());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_quant8_weight_as_input_3) {
   const Model model = transpose_conv2d::createTestModel_nhwc_quant8_weight_as_input_3();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_quant8_weight_as_input_3);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_quant8_weight_as_input_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18151,12 +19649,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_3,
                            transpose_conv2d::is_ignored_nchw_3,
-                           transpose_conv2d::examples_nchw_3);
+                           transpose_conv2d::get_examples_nchw_3());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_3) {
   const Model model = transpose_conv2d::createTestModel_nchw_3();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_3);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18166,12 +19664,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_weight_as_input_3,
                            transpose_conv2d::is_ignored_nchw_weight_as_input_3,
-                           transpose_conv2d::examples_nchw_weight_as_input_3);
+                           transpose_conv2d::get_examples_nchw_weight_as_input_3());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_weight_as_input_3) {
   const Model model = transpose_conv2d::createTestModel_nchw_weight_as_input_3();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_weight_as_input_3);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_weight_as_input_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18181,12 +19679,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_relaxed_3,
                            transpose_conv2d::is_ignored_nchw_relaxed_3,
-                           transpose_conv2d::examples_nchw_relaxed_3);
+                           transpose_conv2d::get_examples_nchw_relaxed_3());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_relaxed_3) {
   const Model model = transpose_conv2d::createTestModel_nchw_relaxed_3();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_relaxed_3);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_relaxed_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18196,12 +19694,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_relaxed_weight_as_input_3,
                            transpose_conv2d::is_ignored_nchw_relaxed_weight_as_input_3,
-                           transpose_conv2d::examples_nchw_relaxed_weight_as_input_3);
+                           transpose_conv2d::get_examples_nchw_relaxed_weight_as_input_3());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_relaxed_weight_as_input_3) {
   const Model model = transpose_conv2d::createTestModel_nchw_relaxed_weight_as_input_3();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_relaxed_weight_as_input_3);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_relaxed_weight_as_input_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18211,12 +19709,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_quant8_3,
                            transpose_conv2d::is_ignored_nchw_quant8_3,
-                           transpose_conv2d::examples_nchw_quant8_3);
+                           transpose_conv2d::get_examples_nchw_quant8_3());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_quant8_3) {
   const Model model = transpose_conv2d::createTestModel_nchw_quant8_3();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_quant8_3);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_quant8_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18226,12 +19724,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_quant8_weight_as_input_3,
                            transpose_conv2d::is_ignored_nchw_quant8_weight_as_input_3,
-                           transpose_conv2d::examples_nchw_quant8_weight_as_input_3);
+                           transpose_conv2d::get_examples_nchw_quant8_weight_as_input_3());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_quant8_weight_as_input_3) {
   const Model model = transpose_conv2d::createTestModel_nchw_quant8_weight_as_input_3();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_quant8_weight_as_input_3);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_quant8_weight_as_input_3());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18241,12 +19739,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_4,
                            transpose_conv2d::is_ignored_nhwc_4,
-                           transpose_conv2d::examples_nhwc_4);
+                           transpose_conv2d::get_examples_nhwc_4());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_4) {
   const Model model = transpose_conv2d::createTestModel_nhwc_4();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_4);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18256,12 +19754,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_weight_as_input_4,
                            transpose_conv2d::is_ignored_nhwc_weight_as_input_4,
-                           transpose_conv2d::examples_nhwc_weight_as_input_4);
+                           transpose_conv2d::get_examples_nhwc_weight_as_input_4());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_weight_as_input_4) {
   const Model model = transpose_conv2d::createTestModel_nhwc_weight_as_input_4();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_weight_as_input_4);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_weight_as_input_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18271,12 +19769,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_relaxed_4,
                            transpose_conv2d::is_ignored_nhwc_relaxed_4,
-                           transpose_conv2d::examples_nhwc_relaxed_4);
+                           transpose_conv2d::get_examples_nhwc_relaxed_4());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_relaxed_4) {
   const Model model = transpose_conv2d::createTestModel_nhwc_relaxed_4();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_relaxed_4);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_relaxed_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18286,12 +19784,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_relaxed_weight_as_input_4,
                            transpose_conv2d::is_ignored_nhwc_relaxed_weight_as_input_4,
-                           transpose_conv2d::examples_nhwc_relaxed_weight_as_input_4);
+                           transpose_conv2d::get_examples_nhwc_relaxed_weight_as_input_4());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_relaxed_weight_as_input_4) {
   const Model model = transpose_conv2d::createTestModel_nhwc_relaxed_weight_as_input_4();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_relaxed_weight_as_input_4);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_relaxed_weight_as_input_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18301,12 +19799,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_quant8_4,
                            transpose_conv2d::is_ignored_nhwc_quant8_4,
-                           transpose_conv2d::examples_nhwc_quant8_4);
+                           transpose_conv2d::get_examples_nhwc_quant8_4());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_quant8_4) {
   const Model model = transpose_conv2d::createTestModel_nhwc_quant8_4();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_quant8_4);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_quant8_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18316,12 +19814,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nhwc_quant8_weight_as_input_4,
                            transpose_conv2d::is_ignored_nhwc_quant8_weight_as_input_4,
-                           transpose_conv2d::examples_nhwc_quant8_weight_as_input_4);
+                           transpose_conv2d::get_examples_nhwc_quant8_weight_as_input_4());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nhwc_quant8_weight_as_input_4) {
   const Model model = transpose_conv2d::createTestModel_nhwc_quant8_weight_as_input_4();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nhwc_quant8_weight_as_input_4);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nhwc_quant8_weight_as_input_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18331,12 +19829,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_4,
                            transpose_conv2d::is_ignored_nchw_4,
-                           transpose_conv2d::examples_nchw_4);
+                           transpose_conv2d::get_examples_nchw_4());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_4) {
   const Model model = transpose_conv2d::createTestModel_nchw_4();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_4);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18346,12 +19844,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_weight_as_input_4,
                            transpose_conv2d::is_ignored_nchw_weight_as_input_4,
-                           transpose_conv2d::examples_nchw_weight_as_input_4);
+                           transpose_conv2d::get_examples_nchw_weight_as_input_4());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_weight_as_input_4) {
   const Model model = transpose_conv2d::createTestModel_nchw_weight_as_input_4();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_weight_as_input_4);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_weight_as_input_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18361,12 +19859,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_relaxed_4,
                            transpose_conv2d::is_ignored_nchw_relaxed_4,
-                           transpose_conv2d::examples_nchw_relaxed_4);
+                           transpose_conv2d::get_examples_nchw_relaxed_4());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_relaxed_4) {
   const Model model = transpose_conv2d::createTestModel_nchw_relaxed_4();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_relaxed_4);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_relaxed_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18376,12 +19874,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_relaxed_weight_as_input_4,
                            transpose_conv2d::is_ignored_nchw_relaxed_weight_as_input_4,
-                           transpose_conv2d::examples_nchw_relaxed_weight_as_input_4);
+                           transpose_conv2d::get_examples_nchw_relaxed_weight_as_input_4());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_relaxed_weight_as_input_4) {
   const Model model = transpose_conv2d::createTestModel_nchw_relaxed_weight_as_input_4();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_relaxed_weight_as_input_4);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_relaxed_weight_as_input_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18391,12 +19889,12 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_quant8_4,
                            transpose_conv2d::is_ignored_nchw_quant8_4,
-                           transpose_conv2d::examples_nchw_quant8_4);
+                           transpose_conv2d::get_examples_nchw_quant8_4());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_quant8_4) {
   const Model model = transpose_conv2d::createTestModel_nchw_quant8_4();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_quant8_4);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_quant8_4());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18406,12 +19904,35 @@
   generated_tests::Execute(device,
                            transpose_conv2d::createTestModel_nchw_quant8_weight_as_input_4,
                            transpose_conv2d::is_ignored_nchw_quant8_weight_as_input_4,
-                           transpose_conv2d::examples_nchw_quant8_weight_as_input_4);
+                           transpose_conv2d::get_examples_nchw_quant8_weight_as_input_4());
 }
 
 TEST_F(ValidationTest, transpose_conv2d_nchw_quant8_weight_as_input_4) {
   const Model model = transpose_conv2d::createTestModel_nchw_quant8_weight_as_input_4();
-  const std::vector<Request> requests = createRequests(transpose_conv2d::examples_nchw_quant8_weight_as_input_4);
+  const std::vector<Request> requests = createRequests(transpose_conv2d::get_examples_nchw_quant8_weight_as_input_4());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+// Generated from: transpose_float16.mod.py.
+namespace transpose_float16 {
+// Generated transpose_float16 test
+#include "examples/transpose_float16.example.cpp"
+// Generated model constructor
+#include "vts_models/transpose_float16.model.cpp"
+} // namespace transpose_float16
+
+TEST_F(NeuralnetworksHidlTest, transpose_float16) {
+  generated_tests::Execute(device,
+                           transpose_float16::createTestModel,
+                           transpose_float16::is_ignored,
+                           transpose_float16::get_examples());
+}
+
+TEST_F(ValidationTest, transpose_float16) {
+  const Model model = transpose_float16::createTestModel();
+  const std::vector<Request> requests = createRequests(transpose_float16::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18429,12 +19950,12 @@
   generated_tests::Execute(device,
                            transpose_v1_2::createTestModel,
                            transpose_v1_2::is_ignored,
-                           transpose_v1_2::examples);
+                           transpose_v1_2::get_examples());
 }
 
 TEST_F(ValidationTest, transpose_v1_2) {
   const Model model = transpose_v1_2::createTestModel();
-  const std::vector<Request> requests = createRequests(transpose_v1_2::examples);
+  const std::vector<Request> requests = createRequests(transpose_v1_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18444,12 +19965,12 @@
   generated_tests::Execute(device,
                            transpose_v1_2::createTestModel_relaxed,
                            transpose_v1_2::is_ignored_relaxed,
-                           transpose_v1_2::examples_relaxed);
+                           transpose_v1_2::get_examples_relaxed());
 }
 
 TEST_F(ValidationTest, transpose_v1_2_relaxed) {
   const Model model = transpose_v1_2::createTestModel_relaxed();
-  const std::vector<Request> requests = createRequests(transpose_v1_2::examples_relaxed);
+  const std::vector<Request> requests = createRequests(transpose_v1_2::get_examples_relaxed());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18459,12 +19980,12 @@
   generated_tests::Execute(device,
                            transpose_v1_2::createTestModel_quant8,
                            transpose_v1_2::is_ignored_quant8,
-                           transpose_v1_2::examples_quant8);
+                           transpose_v1_2::get_examples_quant8());
 }
 
 TEST_F(ValidationTest, transpose_v1_2_quant8) {
   const Model model = transpose_v1_2::createTestModel_quant8();
-  const std::vector<Request> requests = createRequests(transpose_v1_2::examples_quant8);
+  const std::vector<Request> requests = createRequests(transpose_v1_2::get_examples_quant8());
   validateModel(model);
   validateRequests(model, requests);
 }
diff --git a/nn/runtime/test/generated/examples/abs.example.cpp b/nn/runtime/test/generated/examples/abs.example.cpp
new file mode 100644
index 0000000..9215c40
--- /dev/null
+++ b/nn/runtime/test/generated/examples/abs.example.cpp
@@ -0,0 +1,113 @@
+// clang-format off
+// Generated file (from: abs.mod.py). Do not edit
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-6.0f, -5.9f, -5.8f, -5.7f, -5.6f, -5.5f, -5.4f, -5.3f, -5.2f, -5.1f, -5.0f, -4.9f, -4.8f, -4.7f, -4.6f, -4.5f, -4.4f, -4.3f, -4.2f, -4.1f, -4.0f, -3.9f, -3.8f, -3.7f, -3.6f, -3.5f, -3.4f, -3.3f, -3.2f, -3.1f, -3.0f, -2.9f, -2.8f, -2.7f, -2.6f, -2.5f, -2.4f, -2.3f, -2.2f, -2.1f, -2.0f, -1.9f, -1.8f, -1.7f, -1.6f, -1.5f, -1.4f, -1.3f, -1.2f, -1.1f, -1.0f, -0.9f, -0.8f, -0.7f, -0.6f, -0.5f, -0.4f, -0.3f, -0.2f, -0.1f, 0.0f, 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.0f, 1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f, 1.7f, 1.8f, 1.9f, 2.0f, 2.1f, 2.2f, 2.3f, 2.4f, 2.5f, 2.6f, 2.7f, 2.8f, 2.9f, 3.0f, 3.1f, 3.2f, 3.3f, 3.4f, 3.5f, 3.6f, 3.7f, 3.8f, 3.9f, 4.0f, 4.1f, 4.2f, 4.3f, 4.4f, 4.5f, 4.6f, 4.7f, 4.8f, 4.9f, 5.0f, 5.1f, 5.2f, 5.3f, 5.4f, 5.5f, 5.6f, 5.7f, 5.8f, 5.9f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {6.0f, 5.9f, 5.8f, 5.7f, 5.6f, 5.5f, 5.4f, 5.3f, 5.2f, 5.1f, 5.0f, 4.9f, 4.8f, 4.7f, 4.6f, 4.5f, 4.4f, 4.3f, 4.2f, 4.1f, 4.0f, 3.9f, 3.8f, 3.7f, 3.6f, 3.5f, 3.4f, 3.3f, 3.2f, 3.1f, 3.0f, 2.9f, 2.8f, 2.7f, 2.6f, 2.5f, 2.4f, 2.3f, 2.2f, 2.1f, 2.0f, 1.9f, 1.8f, 1.7f, 1.6f, 1.5f, 1.4f, 1.3f, 1.2f, 1.1f, 1.0f, 0.9f, 0.8f, 0.7f, 0.6f, 0.5f, 0.4f, 0.3f, 0.2f, 0.1f, 0.0f, 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.0f, 1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f, 1.7f, 1.8f, 1.9f, 2.0f, 2.1f, 2.2f, 2.3f, 2.4f, 2.5f, 2.6f, 2.7f, 2.8f, 2.9f, 3.0f, 3.1f, 3.2f, 3.3f, 3.4f, 3.5f, 3.6f, 3.7f, 3.8f, 3.9f, 4.0f, 4.1f, 4.2f, 4.3f, 4.4f, 4.5f, 4.6f, 4.7f, 4.8f, 4.9f, 5.0f, 5.1f, 5.2f, 5.3f, 5.4f, 5.5f, 5.6f, 5.7f, 5.8f, 5.9f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples;
+};
+
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-6.0f, -5.9f, -5.8f, -5.7f, -5.6f, -5.5f, -5.4f, -5.3f, -5.2f, -5.1f, -5.0f, -4.9f, -4.8f, -4.7f, -4.6f, -4.5f, -4.4f, -4.3f, -4.2f, -4.1f, -4.0f, -3.9f, -3.8f, -3.7f, -3.6f, -3.5f, -3.4f, -3.3f, -3.2f, -3.1f, -3.0f, -2.9f, -2.8f, -2.7f, -2.6f, -2.5f, -2.4f, -2.3f, -2.2f, -2.1f, -2.0f, -1.9f, -1.8f, -1.7f, -1.6f, -1.5f, -1.4f, -1.3f, -1.2f, -1.1f, -1.0f, -0.9f, -0.8f, -0.7f, -0.6f, -0.5f, -0.4f, -0.3f, -0.2f, -0.1f, 0.0f, 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.0f, 1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f, 1.7f, 1.8f, 1.9f, 2.0f, 2.1f, 2.2f, 2.3f, 2.4f, 2.5f, 2.6f, 2.7f, 2.8f, 2.9f, 3.0f, 3.1f, 3.2f, 3.3f, 3.4f, 3.5f, 3.6f, 3.7f, 3.8f, 3.9f, 4.0f, 4.1f, 4.2f, 4.3f, 4.4f, 4.5f, 4.6f, 4.7f, 4.8f, 4.9f, 5.0f, 5.1f, 5.2f, 5.3f, 5.4f, 5.5f, 5.6f, 5.7f, 5.8f, 5.9f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {6.0f, 5.9f, 5.8f, 5.7f, 5.6f, 5.5f, 5.4f, 5.3f, 5.2f, 5.1f, 5.0f, 4.9f, 4.8f, 4.7f, 4.6f, 4.5f, 4.4f, 4.3f, 4.2f, 4.1f, 4.0f, 3.9f, 3.8f, 3.7f, 3.6f, 3.5f, 3.4f, 3.3f, 3.2f, 3.1f, 3.0f, 2.9f, 2.8f, 2.7f, 2.6f, 2.5f, 2.4f, 2.3f, 2.2f, 2.1f, 2.0f, 1.9f, 1.8f, 1.7f, 1.6f, 1.5f, 1.4f, 1.3f, 1.2f, 1.1f, 1.0f, 0.9f, 0.8f, 0.7f, 0.6f, 0.5f, 0.4f, 0.3f, 0.2f, 0.1f, 0.0f, 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.0f, 1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f, 1.7f, 1.8f, 1.9f, 2.0f, 2.1f, 2.2f, 2.3f, 2.4f, 2.5f, 2.6f, 2.7f, 2.8f, 2.9f, 3.0f, 3.1f, 3.2f, 3.3f, 3.4f, 3.5f, 3.6f, 3.7f, 3.8f, 3.9f, 4.0f, 4.1f, 4.2f, 4.3f, 4.4f, 4.5f, 4.6f, 4.7f, 4.8f, 4.9f, 5.0f, 5.1f, 5.2f, 5.3f, 5.4f, 5.5f, 5.6f, 5.7f, 5.8f, 5.9f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_relaxed;
+};
+
+std::vector<MixedTypedExample>& get_examples_float16() {
+static std::vector<MixedTypedExample> examples_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {-6.0f, -5.900000095367432f, -5.800000190734863f, -5.699999809265137f, -5.599999904632568f, -5.5f, -5.400000095367432f, -5.300000190734863f, -5.199999809265137f, -5.099999904632568f, -5.0f, -4.900000095367432f, -4.800000190734863f, -4.699999809265137f, -4.599999904632568f, -4.5f, -4.400000095367432f, -4.300000190734863f, -4.199999809265137f, -4.099999904632568f, -4.0f, -3.9000000953674316f, -3.799999952316284f, -3.700000047683716f, -3.5999999046325684f, -3.5f, -3.4000000953674316f, -3.299999952316284f, -3.200000047683716f, -3.0999999046325684f, -3.0f, -2.9000000953674316f, -2.799999952316284f, -2.700000047683716f, -2.5999999046325684f, -2.5f, -2.4000000953674316f, -2.299999952316284f, -2.200000047683716f, -2.0999999046325684f, -2.0f, -1.899999976158142f, -1.7999999523162842f, -1.7000000476837158f, -1.600000023841858f, -1.5f, -1.399999976158142f, -1.2999999523162842f, -1.2000000476837158f, -1.100000023841858f, -1.0f, -0.8999999761581421f, -0.800000011920929f, -0.699999988079071f, -0.6000000238418579f, -0.5f, -0.4000000059604645f, -0.30000001192092896f, -0.20000000298023224f, -0.10000000149011612f, 0.0f, 0.10000000149011612f, 0.20000000298023224f, 0.30000001192092896f, 0.4000000059604645f, 0.5f, 0.6000000238418579f, 0.699999988079071f, 0.800000011920929f, 0.8999999761581421f, 1.0f, 1.100000023841858f, 1.2000000476837158f, 1.2999999523162842f, 1.399999976158142f, 1.5f, 1.600000023841858f, 1.7000000476837158f, 1.7999999523162842f, 1.899999976158142f, 2.0f, 2.0999999046325684f, 2.200000047683716f, 2.299999952316284f, 2.4000000953674316f, 2.5f, 2.5999999046325684f, 2.700000047683716f, 2.799999952316284f, 2.9000000953674316f, 3.0f, 3.0999999046325684f, 3.200000047683716f, 3.299999952316284f, 3.4000000953674316f, 3.5f, 3.5999999046325684f, 3.700000047683716f, 3.799999952316284f, 3.9000000953674316f, 4.0f, 4.099999904632568f, 4.199999809265137f, 4.300000190734863f, 4.400000095367432f, 4.5f, 4.599999904632568f, 4.699999809265137f, 4.800000190734863f, 4.900000095367432f, 5.0f, 5.099999904632568f, 5.199999809265137f, 5.300000190734863f, 5.400000095367432f, 5.5f, 5.599999904632568f, 5.699999809265137f, 5.800000190734863f, 5.900000095367432f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {6.0f, 5.900000095367432f, 5.800000190734863f, 5.699999809265137f, 5.599999904632568f, 5.5f, 5.400000095367432f, 5.300000190734863f, 5.199999809265137f, 5.099999904632568f, 5.0f, 4.900000095367432f, 4.800000190734863f, 4.699999809265137f, 4.599999904632568f, 4.5f, 4.400000095367432f, 4.300000190734863f, 4.199999809265137f, 4.099999904632568f, 4.0f, 3.9000000953674316f, 3.799999952316284f, 3.700000047683716f, 3.5999999046325684f, 3.5f, 3.4000000953674316f, 3.299999952316284f, 3.200000047683716f, 3.0999999046325684f, 3.0f, 2.9000000953674316f, 2.799999952316284f, 2.700000047683716f, 2.5999999046325684f, 2.5f, 2.4000000953674316f, 2.299999952316284f, 2.200000047683716f, 2.0999999046325684f, 2.0f, 1.899999976158142f, 1.7999999523162842f, 1.7000000476837158f, 1.600000023841858f, 1.5f, 1.399999976158142f, 1.2999999523162842f, 1.2000000476837158f, 1.100000023841858f, 1.0f, 0.8999999761581421f, 0.800000011920929f, 0.699999988079071f, 0.6000000238418579f, 0.5f, 0.4000000059604645f, 0.30000001192092896f, 0.20000000298023224f, 0.10000000149011612f, 0.0f, 0.10000000149011612f, 0.20000000298023224f, 0.30000001192092896f, 0.4000000059604645f, 0.5f, 0.6000000238418579f, 0.699999988079071f, 0.800000011920929f, 0.8999999761581421f, 1.0f, 1.100000023841858f, 1.2000000476837158f, 1.2999999523162842f, 1.399999976158142f, 1.5f, 1.600000023841858f, 1.7000000476837158f, 1.7999999523162842f, 1.899999976158142f, 2.0f, 2.0999999046325684f, 2.200000047683716f, 2.299999952316284f, 2.4000000953674316f, 2.5f, 2.5999999046325684f, 2.700000047683716f, 2.799999952316284f, 2.9000000953674316f, 3.0f, 3.0999999046325684f, 3.200000047683716f, 3.299999952316284f, 3.4000000953674316f, 3.5f, 3.5999999046325684f, 3.700000047683716f, 3.799999952316284f, 3.9000000953674316f, 4.0f, 4.099999904632568f, 4.199999809265137f, 4.300000190734863f, 4.400000095367432f, 4.5f, 4.599999904632568f, 4.699999809265137f, 4.800000190734863f, 4.900000095367432f, 5.0f, 5.099999904632568f, 5.199999809265137f, 5.300000190734863f, 5.400000095367432f, 5.5f, 5.599999904632568f, 5.699999809265137f, 5.800000190734863f, 5.900000095367432f}}},
+}
+},
+}, // End of an example
+};
+return examples_float16;
+};
+
diff --git a/nn/runtime/test/generated/examples/add.example.cpp b/nn/runtime/test/generated/examples/add.example.cpp
index 61dd2c3..fe67709 100644
--- a/nn/runtime/test/generated/examples/add.example.cpp
+++ b/nn/runtime/test/generated/examples/add.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: add.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/add_broadcast_float16.example.cpp b/nn/runtime/test/generated/examples/add_broadcast_float16.example.cpp
index 4e42019..d641f7a 100644
--- a/nn/runtime/test/generated/examples/add_broadcast_float16.example.cpp
+++ b/nn/runtime/test/generated/examples/add_broadcast_float16.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: add_broadcast_float16.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/add_broadcast_quant8.example.cpp b/nn/runtime/test/generated/examples/add_broadcast_quant8.example.cpp
index 7c7d7df..eddd756 100644
--- a/nn/runtime/test/generated/examples/add_broadcast_quant8.example.cpp
+++ b/nn/runtime/test/generated/examples/add_broadcast_quant8.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: add_broadcast_quant8.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/add_float16.example.cpp b/nn/runtime/test/generated/examples/add_float16.example.cpp
index 40b3090..7433808 100644
--- a/nn/runtime/test/generated/examples/add_float16.example.cpp
+++ b/nn/runtime/test/generated/examples/add_float16.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: add_float16.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/add_quant8.example.cpp b/nn/runtime/test/generated/examples/add_quant8.example.cpp
index a5f24d0..79156ad 100644
--- a/nn/runtime/test/generated/examples/add_quant8.example.cpp
+++ b/nn/runtime/test/generated/examples/add_quant8.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: add_quant8.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/add_relaxed.example.cpp b/nn/runtime/test/generated/examples/add_relaxed.example.cpp
index fd15ed0..7c317b9 100644
--- a/nn/runtime/test/generated/examples/add_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/add_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: add_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/argmax_1.example.cpp b/nn/runtime/test/generated/examples/argmax_1.example.cpp
new file mode 100644
index 0000000..b55689e
--- /dev/null
+++ b/nn/runtime/test/generated/examples/argmax_1.example.cpp
@@ -0,0 +1,187 @@
+// clang-format off
+// Generated file (from: argmax_1.mod.py). Do not edit
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 4.0f, 3.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 0}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples;
+};
+
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 4.0f, 3.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 0}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_relaxed;
+};
+
+std::vector<MixedTypedExample>& get_examples_float16() {
+static std::vector<MixedTypedExample> examples_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 4.0f, 3.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 0}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_int32() {
+static std::vector<MixedTypedExample> examples_int32 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 2, 4, 3}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 0}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_int32;
+};
+
+std::vector<MixedTypedExample>& get_examples_quant8() {
+static std::vector<MixedTypedExample> examples_quant8 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 4, 3}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 0}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_quant8;
+};
+
diff --git a/nn/runtime/test/generated/examples/argmax_1_float.example.cpp b/nn/runtime/test/generated/examples/argmax_1_float.example.cpp
deleted file mode 100644
index 68e9461..0000000
--- a/nn/runtime/test/generated/examples/argmax_1_float.example.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_1_float.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-.operands = {
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {{0, {1.0f, 2.0f, 4.0f, 3.0f}}},
-  // int -> INT32 map
-  {},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {},
-  // int -> INT32 map
-  {{0, {1, 0}}},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-}
-},
-}, // End of an example
-};
-
diff --git a/nn/runtime/test/generated/examples/argmax_1_float_relaxed.example.cpp b/nn/runtime/test/generated/examples/argmax_1_float_relaxed.example.cpp
deleted file mode 100644
index 682ded0..0000000
--- a/nn/runtime/test/generated/examples/argmax_1_float_relaxed.example.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_1_float_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-.operands = {
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {{0, {1.0f, 2.0f, 4.0f, 3.0f}}},
-  // int -> INT32 map
-  {},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {},
-  // int -> INT32 map
-  {{0, {1, 0}}},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-}
-},
-}, // End of an example
-};
-
diff --git a/nn/runtime/test/generated/examples/argmax_1_int32.example.cpp b/nn/runtime/test/generated/examples/argmax_1_int32.example.cpp
deleted file mode 100644
index a063fe3..0000000
--- a/nn/runtime/test/generated/examples/argmax_1_int32.example.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_1_int32.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-.operands = {
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {},
-  // int -> INT32 map
-  {{0, {1, 2, 4, 3}}},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {},
-  // int -> INT32 map
-  {{0, {1, 0}}},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-}
-},
-}, // End of an example
-};
-
diff --git a/nn/runtime/test/generated/examples/argmax_1_quant8.example.cpp b/nn/runtime/test/generated/examples/argmax_1_quant8.example.cpp
deleted file mode 100644
index dafd4bc..0000000
--- a/nn/runtime/test/generated/examples/argmax_1_quant8.example.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_1_quant8.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-.operands = {
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {},
-  // int -> INT32 map
-  {},
-  // int -> QUANT8_ASYMM map
-  {{0, {1, 2, 4, 3}}},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {},
-  // int -> INT32 map
-  {{0, {1, 0}}},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-}
-},
-}, // End of an example
-};
-
diff --git a/nn/runtime/test/generated/examples/argmax_2.example.cpp b/nn/runtime/test/generated/examples/argmax_2.example.cpp
new file mode 100644
index 0000000..1ed89c8
--- /dev/null
+++ b/nn/runtime/test/generated/examples/argmax_2.example.cpp
@@ -0,0 +1,187 @@
+// clang-format off
+// Generated file (from: argmax_2.mod.py). Do not edit
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 4.0f, 3.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples;
+};
+
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 4.0f, 3.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_relaxed;
+};
+
+std::vector<MixedTypedExample>& get_examples_float16() {
+static std::vector<MixedTypedExample> examples_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 4.0f, 3.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_int32() {
+static std::vector<MixedTypedExample> examples_int32 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 2, 4, 3}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_int32;
+};
+
+std::vector<MixedTypedExample>& get_examples_quant8() {
+static std::vector<MixedTypedExample> examples_quant8 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 4, 3}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_quant8;
+};
+
diff --git a/nn/runtime/test/generated/examples/argmax_2_float.example.cpp b/nn/runtime/test/generated/examples/argmax_2_float.example.cpp
deleted file mode 100644
index fe4d149..0000000
--- a/nn/runtime/test/generated/examples/argmax_2_float.example.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_2_float.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-.operands = {
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {{0, {1.0f, 2.0f, 4.0f, 3.0f}}},
-  // int -> INT32 map
-  {},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {},
-  // int -> INT32 map
-  {{0, {1, 1}}},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-}
-},
-}, // End of an example
-};
-
diff --git a/nn/runtime/test/generated/examples/argmax_2_float_relaxed.example.cpp b/nn/runtime/test/generated/examples/argmax_2_float_relaxed.example.cpp
deleted file mode 100644
index e510a98..0000000
--- a/nn/runtime/test/generated/examples/argmax_2_float_relaxed.example.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_2_float_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-.operands = {
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {{0, {1.0f, 2.0f, 4.0f, 3.0f}}},
-  // int -> INT32 map
-  {},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {},
-  // int -> INT32 map
-  {{0, {1, 1}}},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-}
-},
-}, // End of an example
-};
-
diff --git a/nn/runtime/test/generated/examples/argmax_2_int32.example.cpp b/nn/runtime/test/generated/examples/argmax_2_int32.example.cpp
deleted file mode 100644
index 131c998..0000000
--- a/nn/runtime/test/generated/examples/argmax_2_int32.example.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_2_int32.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-.operands = {
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {},
-  // int -> INT32 map
-  {{0, {1, 2, 4, 3}}},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {},
-  // int -> INT32 map
-  {{0, {1, 1}}},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-}
-},
-}, // End of an example
-};
-
diff --git a/nn/runtime/test/generated/examples/argmax_2_quant8.example.cpp b/nn/runtime/test/generated/examples/argmax_2_quant8.example.cpp
deleted file mode 100644
index d613058..0000000
--- a/nn/runtime/test/generated/examples/argmax_2_quant8.example.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_2_quant8.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-.operands = {
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {},
-  // int -> INT32 map
-  {},
-  // int -> QUANT8_ASYMM map
-  {{0, {1, 2, 4, 3}}},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {},
-  // int -> INT32 map
-  {{0, {1, 1}}},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-}
-},
-}, // End of an example
-};
-
diff --git a/nn/runtime/test/generated/examples/argmax_3.example.cpp b/nn/runtime/test/generated/examples/argmax_3.example.cpp
new file mode 100644
index 0000000..1b6685c
--- /dev/null
+++ b/nn/runtime/test/generated/examples/argmax_3.example.cpp
@@ -0,0 +1,187 @@
+// clang-format off
+// Generated file (from: argmax_3.mod.py). Do not edit
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 4.0f, 3.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 0}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples;
+};
+
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 4.0f, 3.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 0}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_relaxed;
+};
+
+std::vector<MixedTypedExample>& get_examples_float16() {
+static std::vector<MixedTypedExample> examples_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 4.0f, 3.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 0}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_int32() {
+static std::vector<MixedTypedExample> examples_int32 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 2, 4, 3}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 0}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_int32;
+};
+
+std::vector<MixedTypedExample>& get_examples_quant8() {
+static std::vector<MixedTypedExample> examples_quant8 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 4, 3}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 0}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_quant8;
+};
+
diff --git a/nn/runtime/test/generated/examples/argmax_3_float.example.cpp b/nn/runtime/test/generated/examples/argmax_3_float.example.cpp
deleted file mode 100644
index 26652ed..0000000
--- a/nn/runtime/test/generated/examples/argmax_3_float.example.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_3_float.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-.operands = {
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {{0, {1.0f, 2.0f, 4.0f, 3.0f}}},
-  // int -> INT32 map
-  {},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {},
-  // int -> INT32 map
-  {{0, {1, 0}}},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-}
-},
-}, // End of an example
-};
-
diff --git a/nn/runtime/test/generated/examples/argmin_1.example.cpp b/nn/runtime/test/generated/examples/argmin_1.example.cpp
new file mode 100644
index 0000000..7478b7d
--- /dev/null
+++ b/nn/runtime/test/generated/examples/argmin_1.example.cpp
@@ -0,0 +1,187 @@
+// clang-format off
+// Generated file (from: argmin_1.mod.py). Do not edit
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 4.0f, 3.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {0, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples;
+};
+
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 4.0f, 3.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {0, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_relaxed;
+};
+
+std::vector<MixedTypedExample>& get_examples_float16() {
+static std::vector<MixedTypedExample> examples_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 4.0f, 3.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {0, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_int32() {
+static std::vector<MixedTypedExample> examples_int32 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 2, 4, 3}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {0, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_int32;
+};
+
+std::vector<MixedTypedExample>& get_examples_quant8() {
+static std::vector<MixedTypedExample> examples_quant8 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 4, 3}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {0, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_quant8;
+};
+
diff --git a/nn/runtime/test/generated/examples/argmin_1_float.example.cpp b/nn/runtime/test/generated/examples/argmin_1_float.example.cpp
deleted file mode 100644
index 53ecdc5..0000000
--- a/nn/runtime/test/generated/examples/argmin_1_float.example.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_1_float.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-.operands = {
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {{0, {1.0f, 2.0f, 4.0f, 3.0f}}},
-  // int -> INT32 map
-  {},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {},
-  // int -> INT32 map
-  {{0, {0, 1}}},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-}
-},
-}, // End of an example
-};
-
diff --git a/nn/runtime/test/generated/examples/argmin_1_float_relaxed.example.cpp b/nn/runtime/test/generated/examples/argmin_1_float_relaxed.example.cpp
deleted file mode 100644
index c7d1162..0000000
--- a/nn/runtime/test/generated/examples/argmin_1_float_relaxed.example.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_1_float_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-.operands = {
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {{0, {1.0f, 2.0f, 4.0f, 3.0f}}},
-  // int -> INT32 map
-  {},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {},
-  // int -> INT32 map
-  {{0, {0, 1}}},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-}
-},
-}, // End of an example
-};
-
diff --git a/nn/runtime/test/generated/examples/argmin_1_int32.example.cpp b/nn/runtime/test/generated/examples/argmin_1_int32.example.cpp
deleted file mode 100644
index bfa7f48..0000000
--- a/nn/runtime/test/generated/examples/argmin_1_int32.example.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_1_int32.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-.operands = {
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {},
-  // int -> INT32 map
-  {{0, {1, 2, 4, 3}}},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {},
-  // int -> INT32 map
-  {{0, {0, 1}}},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-}
-},
-}, // End of an example
-};
-
diff --git a/nn/runtime/test/generated/examples/argmin_1_quant8.example.cpp b/nn/runtime/test/generated/examples/argmin_1_quant8.example.cpp
deleted file mode 100644
index 3730918..0000000
--- a/nn/runtime/test/generated/examples/argmin_1_quant8.example.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_1_quant8.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-.operands = {
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {},
-  // int -> INT32 map
-  {},
-  // int -> QUANT8_ASYMM map
-  {{0, {1, 2, 4, 3}}},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {},
-  // int -> INT32 map
-  {{0, {0, 1}}},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-}
-},
-}, // End of an example
-};
-
diff --git a/nn/runtime/test/generated/examples/argmin_2.example.cpp b/nn/runtime/test/generated/examples/argmin_2.example.cpp
new file mode 100644
index 0000000..58947ae
--- /dev/null
+++ b/nn/runtime/test/generated/examples/argmin_2.example.cpp
@@ -0,0 +1,187 @@
+// clang-format off
+// Generated file (from: argmin_2.mod.py). Do not edit
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 4.0f, 3.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {0, 0}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples;
+};
+
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 4.0f, 3.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {0, 0}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_relaxed;
+};
+
+std::vector<MixedTypedExample>& get_examples_float16() {
+static std::vector<MixedTypedExample> examples_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 4.0f, 3.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {0, 0}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_int32() {
+static std::vector<MixedTypedExample> examples_int32 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 2, 4, 3}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {0, 0}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_int32;
+};
+
+std::vector<MixedTypedExample>& get_examples_quant8() {
+static std::vector<MixedTypedExample> examples_quant8 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 4, 3}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {0, 0}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_quant8;
+};
+
diff --git a/nn/runtime/test/generated/examples/argmin_2_float.example.cpp b/nn/runtime/test/generated/examples/argmin_2_float.example.cpp
deleted file mode 100644
index d28043c..0000000
--- a/nn/runtime/test/generated/examples/argmin_2_float.example.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_2_float.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-.operands = {
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {{0, {1.0f, 2.0f, 4.0f, 3.0f}}},
-  // int -> INT32 map
-  {},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {},
-  // int -> INT32 map
-  {{0, {0, 0}}},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-}
-},
-}, // End of an example
-};
-
diff --git a/nn/runtime/test/generated/examples/argmin_2_float_relaxed.example.cpp b/nn/runtime/test/generated/examples/argmin_2_float_relaxed.example.cpp
deleted file mode 100644
index cf495af..0000000
--- a/nn/runtime/test/generated/examples/argmin_2_float_relaxed.example.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_2_float_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-.operands = {
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {{0, {1.0f, 2.0f, 4.0f, 3.0f}}},
-  // int -> INT32 map
-  {},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {},
-  // int -> INT32 map
-  {{0, {0, 0}}},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-}
-},
-}, // End of an example
-};
-
diff --git a/nn/runtime/test/generated/examples/argmin_2_int32.example.cpp b/nn/runtime/test/generated/examples/argmin_2_int32.example.cpp
deleted file mode 100644
index 2001872..0000000
--- a/nn/runtime/test/generated/examples/argmin_2_int32.example.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_2_int32.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-.operands = {
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {},
-  // int -> INT32 map
-  {{0, {1, 2, 4, 3}}},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {},
-  // int -> INT32 map
-  {{0, {0, 0}}},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-}
-},
-}, // End of an example
-};
-
diff --git a/nn/runtime/test/generated/examples/argmin_3.example.cpp b/nn/runtime/test/generated/examples/argmin_3.example.cpp
new file mode 100644
index 0000000..6907824
--- /dev/null
+++ b/nn/runtime/test/generated/examples/argmin_3.example.cpp
@@ -0,0 +1,187 @@
+// clang-format off
+// Generated file (from: argmin_3.mod.py). Do not edit
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 4.0f, 3.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {0, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples;
+};
+
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 4.0f, 3.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {0, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_relaxed;
+};
+
+std::vector<MixedTypedExample>& get_examples_float16() {
+static std::vector<MixedTypedExample> examples_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 4.0f, 3.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {0, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_int32() {
+static std::vector<MixedTypedExample> examples_int32 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 2, 4, 3}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {0, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_int32;
+};
+
+std::vector<MixedTypedExample>& get_examples_quant8() {
+static std::vector<MixedTypedExample> examples_quant8 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 4, 3}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {0, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_quant8;
+};
+
diff --git a/nn/runtime/test/generated/examples/argmin_3_float.example.cpp b/nn/runtime/test/generated/examples/argmin_3_float.example.cpp
deleted file mode 100644
index 58ba3b1..0000000
--- a/nn/runtime/test/generated/examples/argmin_3_float.example.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_3_float.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-.operands = {
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {{0, {1.0f, 2.0f, 4.0f, 3.0f}}},
-  // int -> INT32 map
-  {},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> FLOAT32 map
-  {},
-  // int -> INT32 map
-  {{0, {0, 1}}},
-  // int -> QUANT8_ASYMM map
-  {},
-  // int -> QUANT16_SYMM map
-  {},
-  // int -> FLOAT16 map
-  {},
-}
-},
-}, // End of an example
-};
-
diff --git a/nn/runtime/test/generated/examples/avg_pool_float_1.example.cpp b/nn/runtime/test/generated/examples/avg_pool_float_1.example.cpp
index a7cb1ef..2e99b12 100644
--- a/nn/runtime/test/generated/examples/avg_pool_float_1.example.cpp
+++ b/nn/runtime/test/generated/examples/avg_pool_float_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: avg_pool_float_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/avg_pool_float_1_relaxed.example.cpp b/nn/runtime/test/generated/examples/avg_pool_float_1_relaxed.example.cpp
index d79a238..0b1b889 100644
--- a/nn/runtime/test/generated/examples/avg_pool_float_1_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/avg_pool_float_1_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: avg_pool_float_1_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/avg_pool_float_2.example.cpp b/nn/runtime/test/generated/examples/avg_pool_float_2.example.cpp
index 4deea37..1c4eba0 100644
--- a/nn/runtime/test/generated/examples/avg_pool_float_2.example.cpp
+++ b/nn/runtime/test/generated/examples/avg_pool_float_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: avg_pool_float_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/avg_pool_float_2_relaxed.example.cpp b/nn/runtime/test/generated/examples/avg_pool_float_2_relaxed.example.cpp
index aee0a08..a0fbfed 100644
--- a/nn/runtime/test/generated/examples/avg_pool_float_2_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/avg_pool_float_2_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: avg_pool_float_2_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/avg_pool_float_3.example.cpp b/nn/runtime/test/generated/examples/avg_pool_float_3.example.cpp
index d5b30aa..84ad9cb 100644
--- a/nn/runtime/test/generated/examples/avg_pool_float_3.example.cpp
+++ b/nn/runtime/test/generated/examples/avg_pool_float_3.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: avg_pool_float_3.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/avg_pool_float_3_relaxed.example.cpp b/nn/runtime/test/generated/examples/avg_pool_float_3_relaxed.example.cpp
index 044dc1d..db35618 100644
--- a/nn/runtime/test/generated/examples/avg_pool_float_3_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/avg_pool_float_3_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: avg_pool_float_3_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/avg_pool_float_4.example.cpp b/nn/runtime/test/generated/examples/avg_pool_float_4.example.cpp
index 542aa2a..f0505ec 100644
--- a/nn/runtime/test/generated/examples/avg_pool_float_4.example.cpp
+++ b/nn/runtime/test/generated/examples/avg_pool_float_4.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: avg_pool_float_4.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/avg_pool_float_4_relaxed.example.cpp b/nn/runtime/test/generated/examples/avg_pool_float_4_relaxed.example.cpp
index 6c75cae..335a4e0 100644
--- a/nn/runtime/test/generated/examples/avg_pool_float_4_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/avg_pool_float_4_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: avg_pool_float_4_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/avg_pool_float_5.example.cpp b/nn/runtime/test/generated/examples/avg_pool_float_5.example.cpp
index ecc9317..e8ce3f6 100644
--- a/nn/runtime/test/generated/examples/avg_pool_float_5.example.cpp
+++ b/nn/runtime/test/generated/examples/avg_pool_float_5.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: avg_pool_float_5.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/avg_pool_float_5_relaxed.example.cpp b/nn/runtime/test/generated/examples/avg_pool_float_5_relaxed.example.cpp
index 9da5537..90fc982 100644
--- a/nn/runtime/test/generated/examples/avg_pool_float_5_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/avg_pool_float_5_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: avg_pool_float_5_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/avg_pool_quant8_1.example.cpp b/nn/runtime/test/generated/examples/avg_pool_quant8_1.example.cpp
index ff39d92..b4181df 100644
--- a/nn/runtime/test/generated/examples/avg_pool_quant8_1.example.cpp
+++ b/nn/runtime/test/generated/examples/avg_pool_quant8_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: avg_pool_quant8_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/avg_pool_quant8_2.example.cpp b/nn/runtime/test/generated/examples/avg_pool_quant8_2.example.cpp
index b19d684..39b5f29 100644
--- a/nn/runtime/test/generated/examples/avg_pool_quant8_2.example.cpp
+++ b/nn/runtime/test/generated/examples/avg_pool_quant8_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: avg_pool_quant8_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/avg_pool_quant8_3.example.cpp b/nn/runtime/test/generated/examples/avg_pool_quant8_3.example.cpp
index c16d3eb..cce59c4 100644
--- a/nn/runtime/test/generated/examples/avg_pool_quant8_3.example.cpp
+++ b/nn/runtime/test/generated/examples/avg_pool_quant8_3.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: avg_pool_quant8_3.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/avg_pool_quant8_4.example.cpp b/nn/runtime/test/generated/examples/avg_pool_quant8_4.example.cpp
index 015275b..7036c27 100644
--- a/nn/runtime/test/generated/examples/avg_pool_quant8_4.example.cpp
+++ b/nn/runtime/test/generated/examples/avg_pool_quant8_4.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: avg_pool_quant8_4.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/avg_pool_quant8_5.example.cpp b/nn/runtime/test/generated/examples/avg_pool_quant8_5.example.cpp
index 1b540ce..3c6ef7b 100644
--- a/nn/runtime/test/generated/examples/avg_pool_quant8_5.example.cpp
+++ b/nn/runtime/test/generated/examples/avg_pool_quant8_5.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: avg_pool_quant8_5.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/avg_pool_v1_2.example.cpp b/nn/runtime/test/generated/examples/avg_pool_v1_2.example.cpp
index 9597ba7..cef5e4d 100644
--- a/nn/runtime/test/generated/examples/avg_pool_v1_2.example.cpp
+++ b/nn/runtime/test/generated/examples/avg_pool_v1_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: avg_pool_v1_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples_nhwc = {
+std::vector<MixedTypedExample>& get_examples_nhwc() {
+static std::vector<MixedTypedExample> examples_nhwc = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +108,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nchw = {
+std::vector<MixedTypedExample>& get_examples_nchw() {
+static std::vector<MixedTypedExample> examples_nchw = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_nchw;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -169,8 +182,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nchw_quant8() {
+static std::vector<MixedTypedExample> examples_nchw_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -203,8 +219,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_2() {
+static std::vector<MixedTypedExample> examples_nhwc_2 = {
 // Begin of an example
 {
 .operands = {
@@ -237,8 +256,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -271,8 +293,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8_2() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -305,8 +330,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_2() {
+static std::vector<MixedTypedExample> examples_nchw_2 = {
 // Begin of an example
 {
 .operands = {
@@ -339,8 +367,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -373,8 +404,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_quant8_2() {
+static std::vector<MixedTypedExample> examples_nchw_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -407,8 +441,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_3 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_3() {
+static std::vector<MixedTypedExample> examples_nhwc_3 = {
 // Begin of an example
 {
 .operands = {
@@ -441,8 +478,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_3;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed_3 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed_3() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed_3 = {
 // Begin of an example
 {
 .operands = {
@@ -475,8 +515,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed_3;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8_3 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8_3() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8_3 = {
 // Begin of an example
 {
 .operands = {
@@ -509,8 +552,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8_3;
+};
 
-std::vector<MixedTypedExample> examples_nchw_3 = {
+std::vector<MixedTypedExample>& get_examples_nchw_3() {
+static std::vector<MixedTypedExample> examples_nchw_3 = {
 // Begin of an example
 {
 .operands = {
@@ -543,8 +589,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_3;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed_3 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed_3() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed_3 = {
 // Begin of an example
 {
 .operands = {
@@ -577,8 +626,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed_3;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8_3 = {
+std::vector<MixedTypedExample>& get_examples_nchw_quant8_3() {
+static std::vector<MixedTypedExample> examples_nchw_quant8_3 = {
 // Begin of an example
 {
 .operands = {
@@ -611,8 +663,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8_3;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_4 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_4() {
+static std::vector<MixedTypedExample> examples_nhwc_4 = {
 // Begin of an example
 {
 .operands = {
@@ -645,8 +700,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_4;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed_4 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed_4() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed_4 = {
 // Begin of an example
 {
 .operands = {
@@ -679,8 +737,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed_4;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8_4 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8_4() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8_4 = {
 // Begin of an example
 {
 .operands = {
@@ -713,8 +774,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8_4;
+};
 
-std::vector<MixedTypedExample> examples_nchw_4 = {
+std::vector<MixedTypedExample>& get_examples_nchw_4() {
+static std::vector<MixedTypedExample> examples_nchw_4 = {
 // Begin of an example
 {
 .operands = {
@@ -747,8 +811,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_4;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed_4 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed_4() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed_4 = {
 // Begin of an example
 {
 .operands = {
@@ -781,8 +848,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed_4;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8_4 = {
+std::vector<MixedTypedExample>& get_examples_nchw_quant8_4() {
+static std::vector<MixedTypedExample> examples_nchw_quant8_4 = {
 // Begin of an example
 {
 .operands = {
@@ -815,8 +885,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8_4;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_5 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_5() {
+static std::vector<MixedTypedExample> examples_nhwc_5 = {
 // Begin of an example
 {
 .operands = {
@@ -849,8 +922,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_5;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed_5 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed_5() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed_5 = {
 // Begin of an example
 {
 .operands = {
@@ -883,8 +959,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed_5;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8_5 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8_5() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8_5 = {
 // Begin of an example
 {
 .operands = {
@@ -917,8 +996,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8_5;
+};
 
-std::vector<MixedTypedExample> examples_nchw_5 = {
+std::vector<MixedTypedExample>& get_examples_nchw_5() {
+static std::vector<MixedTypedExample> examples_nchw_5 = {
 // Begin of an example
 {
 .operands = {
@@ -951,8 +1033,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_5;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed_5 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed_5() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed_5 = {
 // Begin of an example
 {
 .operands = {
@@ -985,8 +1070,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed_5;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8_5 = {
+std::vector<MixedTypedExample>& get_examples_nchw_quant8_5() {
+static std::vector<MixedTypedExample> examples_nchw_quant8_5 = {
 // Begin of an example
 {
 .operands = {
@@ -1019,4 +1107,6 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8_5;
+};
 
diff --git a/nn/runtime/test/generated/examples/axis_aligned_bbox_transform.example.cpp b/nn/runtime/test/generated/examples/axis_aligned_bbox_transform.example.cpp
index 1761c05..4a6f997 100644
--- a/nn/runtime/test/generated/examples/axis_aligned_bbox_transform.example.cpp
+++ b/nn/runtime/test/generated/examples/axis_aligned_bbox_transform.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: axis_aligned_bbox_transform.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
-std::vector<MixedTypedExample> examples_relaxed = {
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_2 = {
+std::vector<MixedTypedExample>& get_examples_2() {
+static std::vector<MixedTypedExample> examples_2 = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +108,11 @@
 },
 }, // End of an example
 };
+return examples_2;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_2() {
+static std::vector<MixedTypedExample> examples_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_single_batch = {
+std::vector<MixedTypedExample>& get_examples_single_batch() {
+static std::vector<MixedTypedExample> examples_single_batch = {
 // Begin of an example
 {
 .operands = {
@@ -169,8 +182,11 @@
 },
 }, // End of an example
 };
+return examples_single_batch;
+};
 
-std::vector<MixedTypedExample> examples_single_batch_relaxed = {
+std::vector<MixedTypedExample>& get_examples_single_batch_relaxed() {
+static std::vector<MixedTypedExample> examples_single_batch_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -203,4 +219,6 @@
 },
 }, // End of an example
 };
+return examples_single_batch_relaxed;
+};
 
diff --git a/nn/runtime/test/generated/examples/batch_to_space.example.cpp b/nn/runtime/test/generated/examples/batch_to_space.example.cpp
index 21075ef..30ca664 100644
--- a/nn/runtime/test/generated/examples/batch_to_space.example.cpp
+++ b/nn/runtime/test/generated/examples/batch_to_space.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: batch_to_space.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/batch_to_space_float_1.example.cpp b/nn/runtime/test/generated/examples/batch_to_space_float_1.example.cpp
index 5f16bae..0c3f081 100644
--- a/nn/runtime/test/generated/examples/batch_to_space_float_1.example.cpp
+++ b/nn/runtime/test/generated/examples/batch_to_space_float_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: batch_to_space_float_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/batch_to_space_float_1_relaxed.example.cpp b/nn/runtime/test/generated/examples/batch_to_space_float_1_relaxed.example.cpp
index 7251d72..a4623d6 100644
--- a/nn/runtime/test/generated/examples/batch_to_space_float_1_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/batch_to_space_float_1_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: batch_to_space_float_1_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/batch_to_space_quant8_1.example.cpp b/nn/runtime/test/generated/examples/batch_to_space_quant8_1.example.cpp
index cc50fb4..31af706 100644
--- a/nn/runtime/test/generated/examples/batch_to_space_quant8_1.example.cpp
+++ b/nn/runtime/test/generated/examples/batch_to_space_quant8_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: batch_to_space_quant8_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/batch_to_space_relaxed.example.cpp b/nn/runtime/test/generated/examples/batch_to_space_relaxed.example.cpp
index 23fa0f4..63a6db7 100644
--- a/nn/runtime/test/generated/examples/batch_to_space_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/batch_to_space_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: batch_to_space_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/batch_to_space_v1_2.example.cpp b/nn/runtime/test/generated/examples/batch_to_space_v1_2.example.cpp
index cb310c3..f81da44 100644
--- a/nn/runtime/test/generated/examples/batch_to_space_v1_2.example.cpp
+++ b/nn/runtime/test/generated/examples/batch_to_space_v1_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: batch_to_space_v1_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples_nhwc = {
+std::vector<MixedTypedExample>& get_examples_nhwc() {
+static std::vector<MixedTypedExample> examples_nhwc = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,48 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_float16() {
+static std::vector<MixedTypedExample> examples_nhwc_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.399999976158142f, 2.299999952316284f, 3.200000047683716f, 4.099999904632568f, 5.400000095367432f, 6.300000190734863f, 7.199999809265137f, 8.100000381469727f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.399999976158142f, 2.299999952316284f, 3.200000047683716f, 4.099999904632568f, 5.400000095367432f, 6.300000190734863f, 7.199999809265137f, 8.100000381469727f}}},
+}
+},
+}, // End of an example
+};
+return examples_nhwc_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nchw = {
+std::vector<MixedTypedExample>& get_examples_nchw() {
+static std::vector<MixedTypedExample> examples_nchw = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +182,11 @@
 },
 }, // End of an example
 };
+return examples_nchw;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -169,8 +219,48 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nchw_float16() {
+static std::vector<MixedTypedExample> examples_nchw_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.399999976158142f, 2.299999952316284f, 3.200000047683716f, 4.099999904632568f, 5.400000095367432f, 6.300000190734863f, 7.199999809265137f, 8.100000381469727f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.399999976158142f, 3.200000047683716f, 5.400000095367432f, 7.199999809265137f, 2.299999952316284f, 4.099999904632568f, 6.300000190734863f, 8.100000381469727f}}},
+}
+},
+}, // End of an example
+};
+return examples_nchw_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_nchw_quant8() {
+static std::vector<MixedTypedExample> examples_nchw_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -203,8 +293,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_2() {
+static std::vector<MixedTypedExample> examples_nhwc_2 = {
 // Begin of an example
 {
 .operands = {
@@ -237,8 +330,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -271,8 +367,48 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_float16_2() {
+static std::vector<MixedTypedExample> examples_nhwc_float16_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 5.0f, 2.0f, 6.0f, 9.0f, 13.0f, 10.0f, 14.0f, 3.0f, 7.0f, 4.0f, 8.0f, 11.0f, 15.0f, 12.0f, 16.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_nhwc_float16_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8_2() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -305,8 +441,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_2() {
+static std::vector<MixedTypedExample> examples_nchw_2 = {
 // Begin of an example
 {
 .operands = {
@@ -339,8 +478,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -373,8 +515,48 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_float16_2() {
+static std::vector<MixedTypedExample> examples_nchw_float16_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 5.0f, 2.0f, 6.0f, 9.0f, 13.0f, 10.0f, 14.0f, 3.0f, 7.0f, 4.0f, 8.0f, 11.0f, 15.0f, 12.0f, 16.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_nchw_float16_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_nchw_quant8_2() {
+static std::vector<MixedTypedExample> examples_nchw_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -407,4 +589,6 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8_2;
+};
 
diff --git a/nn/runtime/test/generated/examples/cast.example.cpp b/nn/runtime/test/generated/examples/cast.example.cpp
index 211a3c7..f59eec5 100644
--- a/nn/runtime/test/generated/examples/cast.example.cpp
+++ b/nn/runtime/test/generated/examples/cast.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: cast.mod.py). Do not edit
-std::vector<MixedTypedExample> examples_float16_to_float16 = {
+std::vector<MixedTypedExample>& get_examples_float16_to_float16() {
+static std::vector<MixedTypedExample> examples_float16_to_float16 = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples_float16_to_float16;
+};
 
-std::vector<MixedTypedExample> examples_float16_to_float32 = {
+std::vector<MixedTypedExample>& get_examples_float16_to_float32() {
+static std::vector<MixedTypedExample> examples_float16_to_float32 = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_float16_to_float32;
+};
 
-std::vector<MixedTypedExample> examples_float16_to_float32_relaxed = {
+std::vector<MixedTypedExample>& get_examples_float16_to_float32_relaxed() {
+static std::vector<MixedTypedExample> examples_float16_to_float32_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +108,11 @@
 },
 }, // End of an example
 };
+return examples_float16_to_float32_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_float16_to_int32 = {
+std::vector<MixedTypedExample>& get_examples_float16_to_int32() {
+static std::vector<MixedTypedExample> examples_float16_to_int32 = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_float16_to_int32;
+};
 
-std::vector<MixedTypedExample> examples_float16_to_quant8 = {
+std::vector<MixedTypedExample>& get_examples_float16_to_quant8() {
+static std::vector<MixedTypedExample> examples_float16_to_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -169,8 +182,11 @@
 },
 }, // End of an example
 };
+return examples_float16_to_quant8;
+};
 
-std::vector<MixedTypedExample> examples_float32_to_float16 = {
+std::vector<MixedTypedExample>& get_examples_float32_to_float16() {
+static std::vector<MixedTypedExample> examples_float32_to_float16 = {
 // Begin of an example
 {
 .operands = {
@@ -203,8 +219,11 @@
 },
 }, // End of an example
 };
+return examples_float32_to_float16;
+};
 
-std::vector<MixedTypedExample> examples_float32_to_float16_relaxed = {
+std::vector<MixedTypedExample>& get_examples_float32_to_float16_relaxed() {
+static std::vector<MixedTypedExample> examples_float32_to_float16_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -237,8 +256,11 @@
 },
 }, // End of an example
 };
+return examples_float32_to_float16_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_float32_to_float32 = {
+std::vector<MixedTypedExample>& get_examples_float32_to_float32() {
+static std::vector<MixedTypedExample> examples_float32_to_float32 = {
 // Begin of an example
 {
 .operands = {
@@ -271,8 +293,11 @@
 },
 }, // End of an example
 };
+return examples_float32_to_float32;
+};
 
-std::vector<MixedTypedExample> examples_float32_to_float32_relaxed = {
+std::vector<MixedTypedExample>& get_examples_float32_to_float32_relaxed() {
+static std::vector<MixedTypedExample> examples_float32_to_float32_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -305,8 +330,11 @@
 },
 }, // End of an example
 };
+return examples_float32_to_float32_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_float32_to_int32 = {
+std::vector<MixedTypedExample>& get_examples_float32_to_int32() {
+static std::vector<MixedTypedExample> examples_float32_to_int32 = {
 // Begin of an example
 {
 .operands = {
@@ -339,8 +367,11 @@
 },
 }, // End of an example
 };
+return examples_float32_to_int32;
+};
 
-std::vector<MixedTypedExample> examples_float32_to_int32_relaxed = {
+std::vector<MixedTypedExample>& get_examples_float32_to_int32_relaxed() {
+static std::vector<MixedTypedExample> examples_float32_to_int32_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -373,8 +404,11 @@
 },
 }, // End of an example
 };
+return examples_float32_to_int32_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_float32_to_quant8 = {
+std::vector<MixedTypedExample>& get_examples_float32_to_quant8() {
+static std::vector<MixedTypedExample> examples_float32_to_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -407,8 +441,11 @@
 },
 }, // End of an example
 };
+return examples_float32_to_quant8;
+};
 
-std::vector<MixedTypedExample> examples_float32_to_quant8_relaxed = {
+std::vector<MixedTypedExample>& get_examples_float32_to_quant8_relaxed() {
+static std::vector<MixedTypedExample> examples_float32_to_quant8_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -441,8 +478,11 @@
 },
 }, // End of an example
 };
+return examples_float32_to_quant8_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_int32_to_float16 = {
+std::vector<MixedTypedExample>& get_examples_int32_to_float16() {
+static std::vector<MixedTypedExample> examples_int32_to_float16 = {
 // Begin of an example
 {
 .operands = {
@@ -475,8 +515,11 @@
 },
 }, // End of an example
 };
+return examples_int32_to_float16;
+};
 
-std::vector<MixedTypedExample> examples_int32_to_float32 = {
+std::vector<MixedTypedExample>& get_examples_int32_to_float32() {
+static std::vector<MixedTypedExample> examples_int32_to_float32 = {
 // Begin of an example
 {
 .operands = {
@@ -509,8 +552,11 @@
 },
 }, // End of an example
 };
+return examples_int32_to_float32;
+};
 
-std::vector<MixedTypedExample> examples_int32_to_float32_relaxed = {
+std::vector<MixedTypedExample>& get_examples_int32_to_float32_relaxed() {
+static std::vector<MixedTypedExample> examples_int32_to_float32_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -543,8 +589,11 @@
 },
 }, // End of an example
 };
+return examples_int32_to_float32_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_int32_to_int32 = {
+std::vector<MixedTypedExample>& get_examples_int32_to_int32() {
+static std::vector<MixedTypedExample> examples_int32_to_int32 = {
 // Begin of an example
 {
 .operands = {
@@ -577,8 +626,11 @@
 },
 }, // End of an example
 };
+return examples_int32_to_int32;
+};
 
-std::vector<MixedTypedExample> examples_int32_to_quant8 = {
+std::vector<MixedTypedExample>& get_examples_int32_to_quant8() {
+static std::vector<MixedTypedExample> examples_int32_to_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -611,8 +663,11 @@
 },
 }, // End of an example
 };
+return examples_int32_to_quant8;
+};
 
-std::vector<MixedTypedExample> examples_quant8_to_float16 = {
+std::vector<MixedTypedExample>& get_examples_quant8_to_float16() {
+static std::vector<MixedTypedExample> examples_quant8_to_float16 = {
 // Begin of an example
 {
 .operands = {
@@ -645,8 +700,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_to_float16;
+};
 
-std::vector<MixedTypedExample> examples_quant8_to_float32 = {
+std::vector<MixedTypedExample>& get_examples_quant8_to_float32() {
+static std::vector<MixedTypedExample> examples_quant8_to_float32 = {
 // Begin of an example
 {
 .operands = {
@@ -679,8 +737,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_to_float32;
+};
 
-std::vector<MixedTypedExample> examples_quant8_to_float32_relaxed = {
+std::vector<MixedTypedExample>& get_examples_quant8_to_float32_relaxed() {
+static std::vector<MixedTypedExample> examples_quant8_to_float32_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -713,8 +774,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_to_float32_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_quant8_to_int32 = {
+std::vector<MixedTypedExample>& get_examples_quant8_to_int32() {
+static std::vector<MixedTypedExample> examples_quant8_to_int32 = {
 // Begin of an example
 {
 .operands = {
@@ -747,8 +811,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_to_int32;
+};
 
-std::vector<MixedTypedExample> examples_quant8_to_quant8 = {
+std::vector<MixedTypedExample>& get_examples_quant8_to_quant8() {
+static std::vector<MixedTypedExample> examples_quant8_to_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -781,4 +848,6 @@
 },
 }, // End of an example
 };
+return examples_quant8_to_quant8;
+};
 
diff --git a/nn/runtime/test/generated/examples/channel_shuffle.example.cpp b/nn/runtime/test/generated/examples/channel_shuffle.example.cpp
index d6d4d4e..3d31872 100644
--- a/nn/runtime/test/generated/examples/channel_shuffle.example.cpp
+++ b/nn/runtime/test/generated/examples/channel_shuffle.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: channel_shuffle.mod.py). Do not edit
-std::vector<MixedTypedExample> examples_dim4_axis0 = {
+std::vector<MixedTypedExample>& get_examples_dim4_axis0() {
+static std::vector<MixedTypedExample> examples_dim4_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples_dim4_axis0;
+};
 
-std::vector<MixedTypedExample> examples_dim4_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_dim4_axis0_neg() {
+static std::vector<MixedTypedExample> examples_dim4_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_dim4_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_dim4_axis1 = {
+std::vector<MixedTypedExample>& get_examples_dim4_axis1() {
+static std::vector<MixedTypedExample> examples_dim4_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +108,11 @@
 },
 }, // End of an example
 };
+return examples_dim4_axis1;
+};
 
-std::vector<MixedTypedExample> examples_dim4_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_dim4_axis1_neg() {
+static std::vector<MixedTypedExample> examples_dim4_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_dim4_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_dim4_axis2 = {
+std::vector<MixedTypedExample>& get_examples_dim4_axis2() {
+static std::vector<MixedTypedExample> examples_dim4_axis2 = {
 // Begin of an example
 {
 .operands = {
@@ -169,8 +182,11 @@
 },
 }, // End of an example
 };
+return examples_dim4_axis2;
+};
 
-std::vector<MixedTypedExample> examples_dim4_axis2_neg = {
+std::vector<MixedTypedExample>& get_examples_dim4_axis2_neg() {
+static std::vector<MixedTypedExample> examples_dim4_axis2_neg = {
 // Begin of an example
 {
 .operands = {
@@ -203,8 +219,11 @@
 },
 }, // End of an example
 };
+return examples_dim4_axis2_neg;
+};
 
-std::vector<MixedTypedExample> examples_dim4_axis3 = {
+std::vector<MixedTypedExample>& get_examples_dim4_axis3() {
+static std::vector<MixedTypedExample> examples_dim4_axis3 = {
 // Begin of an example
 {
 .operands = {
@@ -237,8 +256,11 @@
 },
 }, // End of an example
 };
+return examples_dim4_axis3;
+};
 
-std::vector<MixedTypedExample> examples_dim4_axis3_neg = {
+std::vector<MixedTypedExample>& get_examples_dim4_axis3_neg() {
+static std::vector<MixedTypedExample> examples_dim4_axis3_neg = {
 // Begin of an example
 {
 .operands = {
@@ -271,8 +293,11 @@
 },
 }, // End of an example
 };
+return examples_dim4_axis3_neg;
+};
 
-std::vector<MixedTypedExample> examples_dim3_axis0 = {
+std::vector<MixedTypedExample>& get_examples_dim3_axis0() {
+static std::vector<MixedTypedExample> examples_dim3_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -305,8 +330,11 @@
 },
 }, // End of an example
 };
+return examples_dim3_axis0;
+};
 
-std::vector<MixedTypedExample> examples_dim3_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_dim3_axis0_neg() {
+static std::vector<MixedTypedExample> examples_dim3_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -339,8 +367,11 @@
 },
 }, // End of an example
 };
+return examples_dim3_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_dim3_axis1 = {
+std::vector<MixedTypedExample>& get_examples_dim3_axis1() {
+static std::vector<MixedTypedExample> examples_dim3_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -373,8 +404,11 @@
 },
 }, // End of an example
 };
+return examples_dim3_axis1;
+};
 
-std::vector<MixedTypedExample> examples_dim3_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_dim3_axis1_neg() {
+static std::vector<MixedTypedExample> examples_dim3_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -407,8 +441,11 @@
 },
 }, // End of an example
 };
+return examples_dim3_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_dim3_axis2 = {
+std::vector<MixedTypedExample>& get_examples_dim3_axis2() {
+static std::vector<MixedTypedExample> examples_dim3_axis2 = {
 // Begin of an example
 {
 .operands = {
@@ -441,8 +478,11 @@
 },
 }, // End of an example
 };
+return examples_dim3_axis2;
+};
 
-std::vector<MixedTypedExample> examples_dim3_axis2_neg = {
+std::vector<MixedTypedExample>& get_examples_dim3_axis2_neg() {
+static std::vector<MixedTypedExample> examples_dim3_axis2_neg = {
 // Begin of an example
 {
 .operands = {
@@ -475,8 +515,11 @@
 },
 }, // End of an example
 };
+return examples_dim3_axis2_neg;
+};
 
-std::vector<MixedTypedExample> examples_dim2_axis0 = {
+std::vector<MixedTypedExample>& get_examples_dim2_axis0() {
+static std::vector<MixedTypedExample> examples_dim2_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -509,8 +552,11 @@
 },
 }, // End of an example
 };
+return examples_dim2_axis0;
+};
 
-std::vector<MixedTypedExample> examples_dim2_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_dim2_axis0_neg() {
+static std::vector<MixedTypedExample> examples_dim2_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -543,8 +589,11 @@
 },
 }, // End of an example
 };
+return examples_dim2_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_dim2_axis1 = {
+std::vector<MixedTypedExample>& get_examples_dim2_axis1() {
+static std::vector<MixedTypedExample> examples_dim2_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -577,8 +626,11 @@
 },
 }, // End of an example
 };
+return examples_dim2_axis1;
+};
 
-std::vector<MixedTypedExample> examples_dim2_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_dim2_axis1_neg() {
+static std::vector<MixedTypedExample> examples_dim2_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -611,8 +663,11 @@
 },
 }, // End of an example
 };
+return examples_dim2_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_dim1_axis0 = {
+std::vector<MixedTypedExample>& get_examples_dim1_axis0() {
+static std::vector<MixedTypedExample> examples_dim1_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -645,8 +700,11 @@
 },
 }, // End of an example
 };
+return examples_dim1_axis0;
+};
 
-std::vector<MixedTypedExample> examples_dim1_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_dim1_axis0_neg() {
+static std::vector<MixedTypedExample> examples_dim1_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -679,8 +737,11 @@
 },
 }, // End of an example
 };
+return examples_dim1_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_dim4_axis0 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_dim4_axis0() {
+static std::vector<MixedTypedExample> examples_relaxed_dim4_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -713,8 +774,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_dim4_axis0;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_dim4_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_relaxed_dim4_axis0_neg() {
+static std::vector<MixedTypedExample> examples_relaxed_dim4_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -747,8 +811,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_dim4_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_dim4_axis1 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_dim4_axis1() {
+static std::vector<MixedTypedExample> examples_relaxed_dim4_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -781,8 +848,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_dim4_axis1;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_dim4_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_relaxed_dim4_axis1_neg() {
+static std::vector<MixedTypedExample> examples_relaxed_dim4_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -815,8 +885,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_dim4_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_dim4_axis2 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_dim4_axis2() {
+static std::vector<MixedTypedExample> examples_relaxed_dim4_axis2 = {
 // Begin of an example
 {
 .operands = {
@@ -849,8 +922,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_dim4_axis2;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_dim4_axis2_neg = {
+std::vector<MixedTypedExample>& get_examples_relaxed_dim4_axis2_neg() {
+static std::vector<MixedTypedExample> examples_relaxed_dim4_axis2_neg = {
 // Begin of an example
 {
 .operands = {
@@ -883,8 +959,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_dim4_axis2_neg;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_dim4_axis3 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_dim4_axis3() {
+static std::vector<MixedTypedExample> examples_relaxed_dim4_axis3 = {
 // Begin of an example
 {
 .operands = {
@@ -917,8 +996,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_dim4_axis3;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_dim4_axis3_neg = {
+std::vector<MixedTypedExample>& get_examples_relaxed_dim4_axis3_neg() {
+static std::vector<MixedTypedExample> examples_relaxed_dim4_axis3_neg = {
 // Begin of an example
 {
 .operands = {
@@ -951,8 +1033,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_dim4_axis3_neg;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_dim3_axis0 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_dim3_axis0() {
+static std::vector<MixedTypedExample> examples_relaxed_dim3_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -985,8 +1070,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_dim3_axis0;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_dim3_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_relaxed_dim3_axis0_neg() {
+static std::vector<MixedTypedExample> examples_relaxed_dim3_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1019,8 +1107,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_dim3_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_dim3_axis1 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_dim3_axis1() {
+static std::vector<MixedTypedExample> examples_relaxed_dim3_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -1053,8 +1144,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_dim3_axis1;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_dim3_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_relaxed_dim3_axis1_neg() {
+static std::vector<MixedTypedExample> examples_relaxed_dim3_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1087,8 +1181,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_dim3_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_dim3_axis2 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_dim3_axis2() {
+static std::vector<MixedTypedExample> examples_relaxed_dim3_axis2 = {
 // Begin of an example
 {
 .operands = {
@@ -1121,8 +1218,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_dim3_axis2;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_dim3_axis2_neg = {
+std::vector<MixedTypedExample>& get_examples_relaxed_dim3_axis2_neg() {
+static std::vector<MixedTypedExample> examples_relaxed_dim3_axis2_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1155,8 +1255,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_dim3_axis2_neg;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_dim2_axis0 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_dim2_axis0() {
+static std::vector<MixedTypedExample> examples_relaxed_dim2_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -1189,8 +1292,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_dim2_axis0;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_dim2_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_relaxed_dim2_axis0_neg() {
+static std::vector<MixedTypedExample> examples_relaxed_dim2_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1223,8 +1329,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_dim2_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_dim2_axis1 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_dim2_axis1() {
+static std::vector<MixedTypedExample> examples_relaxed_dim2_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -1257,8 +1366,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_dim2_axis1;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_dim2_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_relaxed_dim2_axis1_neg() {
+static std::vector<MixedTypedExample> examples_relaxed_dim2_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1291,8 +1403,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_dim2_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_dim1_axis0 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_dim1_axis0() {
+static std::vector<MixedTypedExample> examples_relaxed_dim1_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -1325,8 +1440,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_dim1_axis0;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_dim1_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_relaxed_dim1_axis0_neg() {
+static std::vector<MixedTypedExample> examples_relaxed_dim1_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1359,8 +1477,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_dim1_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_quant8_dim4_axis0 = {
+std::vector<MixedTypedExample>& get_examples_quant8_dim4_axis0() {
+static std::vector<MixedTypedExample> examples_quant8_dim4_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -1393,8 +1514,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_dim4_axis0;
+};
 
-std::vector<MixedTypedExample> examples_quant8_dim4_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_quant8_dim4_axis0_neg() {
+static std::vector<MixedTypedExample> examples_quant8_dim4_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1427,8 +1551,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_dim4_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_quant8_dim4_axis1 = {
+std::vector<MixedTypedExample>& get_examples_quant8_dim4_axis1() {
+static std::vector<MixedTypedExample> examples_quant8_dim4_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -1461,8 +1588,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_dim4_axis1;
+};
 
-std::vector<MixedTypedExample> examples_quant8_dim4_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_quant8_dim4_axis1_neg() {
+static std::vector<MixedTypedExample> examples_quant8_dim4_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1495,8 +1625,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_dim4_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_quant8_dim4_axis2 = {
+std::vector<MixedTypedExample>& get_examples_quant8_dim4_axis2() {
+static std::vector<MixedTypedExample> examples_quant8_dim4_axis2 = {
 // Begin of an example
 {
 .operands = {
@@ -1529,8 +1662,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_dim4_axis2;
+};
 
-std::vector<MixedTypedExample> examples_quant8_dim4_axis2_neg = {
+std::vector<MixedTypedExample>& get_examples_quant8_dim4_axis2_neg() {
+static std::vector<MixedTypedExample> examples_quant8_dim4_axis2_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1563,8 +1699,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_dim4_axis2_neg;
+};
 
-std::vector<MixedTypedExample> examples_quant8_dim4_axis3 = {
+std::vector<MixedTypedExample>& get_examples_quant8_dim4_axis3() {
+static std::vector<MixedTypedExample> examples_quant8_dim4_axis3 = {
 // Begin of an example
 {
 .operands = {
@@ -1597,8 +1736,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_dim4_axis3;
+};
 
-std::vector<MixedTypedExample> examples_quant8_dim4_axis3_neg = {
+std::vector<MixedTypedExample>& get_examples_quant8_dim4_axis3_neg() {
+static std::vector<MixedTypedExample> examples_quant8_dim4_axis3_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1631,8 +1773,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_dim4_axis3_neg;
+};
 
-std::vector<MixedTypedExample> examples_quant8_dim3_axis0 = {
+std::vector<MixedTypedExample>& get_examples_quant8_dim3_axis0() {
+static std::vector<MixedTypedExample> examples_quant8_dim3_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -1665,8 +1810,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_dim3_axis0;
+};
 
-std::vector<MixedTypedExample> examples_quant8_dim3_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_quant8_dim3_axis0_neg() {
+static std::vector<MixedTypedExample> examples_quant8_dim3_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1699,8 +1847,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_dim3_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_quant8_dim3_axis1 = {
+std::vector<MixedTypedExample>& get_examples_quant8_dim3_axis1() {
+static std::vector<MixedTypedExample> examples_quant8_dim3_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -1733,8 +1884,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_dim3_axis1;
+};
 
-std::vector<MixedTypedExample> examples_quant8_dim3_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_quant8_dim3_axis1_neg() {
+static std::vector<MixedTypedExample> examples_quant8_dim3_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1767,8 +1921,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_dim3_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_quant8_dim3_axis2 = {
+std::vector<MixedTypedExample>& get_examples_quant8_dim3_axis2() {
+static std::vector<MixedTypedExample> examples_quant8_dim3_axis2 = {
 // Begin of an example
 {
 .operands = {
@@ -1801,8 +1958,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_dim3_axis2;
+};
 
-std::vector<MixedTypedExample> examples_quant8_dim3_axis2_neg = {
+std::vector<MixedTypedExample>& get_examples_quant8_dim3_axis2_neg() {
+static std::vector<MixedTypedExample> examples_quant8_dim3_axis2_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1835,8 +1995,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_dim3_axis2_neg;
+};
 
-std::vector<MixedTypedExample> examples_quant8_dim2_axis0 = {
+std::vector<MixedTypedExample>& get_examples_quant8_dim2_axis0() {
+static std::vector<MixedTypedExample> examples_quant8_dim2_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -1869,8 +2032,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_dim2_axis0;
+};
 
-std::vector<MixedTypedExample> examples_quant8_dim2_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_quant8_dim2_axis0_neg() {
+static std::vector<MixedTypedExample> examples_quant8_dim2_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1903,8 +2069,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_dim2_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_quant8_dim2_axis1 = {
+std::vector<MixedTypedExample>& get_examples_quant8_dim2_axis1() {
+static std::vector<MixedTypedExample> examples_quant8_dim2_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -1937,8 +2106,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_dim2_axis1;
+};
 
-std::vector<MixedTypedExample> examples_quant8_dim2_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_quant8_dim2_axis1_neg() {
+static std::vector<MixedTypedExample> examples_quant8_dim2_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1971,8 +2143,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_dim2_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_quant8_dim1_axis0 = {
+std::vector<MixedTypedExample>& get_examples_quant8_dim1_axis0() {
+static std::vector<MixedTypedExample> examples_quant8_dim1_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -2005,8 +2180,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_dim1_axis0;
+};
 
-std::vector<MixedTypedExample> examples_quant8_dim1_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_quant8_dim1_axis0_neg() {
+static std::vector<MixedTypedExample> examples_quant8_dim1_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -2039,4 +2217,6 @@
 },
 }, // End of an example
 };
+return examples_quant8_dim1_axis0_neg;
+};
 
diff --git a/nn/runtime/test/generated/examples/concat_float16_1.example.cpp b/nn/runtime/test/generated/examples/concat_float16_1.example.cpp
index eab9fae..447e6fa 100644
--- a/nn/runtime/test/generated/examples/concat_float16_1.example.cpp
+++ b/nn/runtime/test/generated/examples/concat_float16_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: concat_float16_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -12,7 +13,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}}, {1, {7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f}}},
@@ -25,7 +26,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f}}},
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/concat_float16_2.example.cpp b/nn/runtime/test/generated/examples/concat_float16_2.example.cpp
index 212f075..9458a28 100644
--- a/nn/runtime/test/generated/examples/concat_float16_2.example.cpp
+++ b/nn/runtime/test/generated/examples/concat_float16_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: concat_float16_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -12,7 +13,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, 56.0f, 57.0f, 58.0f, 59.0f, 60.0f, 61.0f, 62.0f, 63.0f, 64.0f, 65.0f, 66.0f, 67.0f, 68.0f, 69.0f, 70.0f, 71.0f, 72.0f, 73.0f, 74.0f, 75.0f, 76.0f, 77.0f, 78.0f, 79.0f, 80.0f, 81.0f, 82.0f, 83.0f, 84.0f, 85.0f, 86.0f, 87.0f, 88.0f, 89.0f, 90.0f, 91.0f, 92.0f, 93.0f, 94.0f, 95.0f, 96.0f, 97.0f, 98.0f, 99.0f, 100.0f, 101.0f, 102.0f, 103.0f, 104.0f, 105.0f, 106.0f, 107.0f, 108.0f, 109.0f, 110.0f, 111.0f, 112.0f, 113.0f, 114.0f, 115.0f, 116.0f, 117.0f, 118.0f, 119.0f, 120.0f, 121.0f, 122.0f, 123.0f, 124.0f, 125.0f, 126.0f, 127.0f, 128.0f, 129.0f, 130.0f, 131.0f, 132.0f, 133.0f, 134.0f, 135.0f, 136.0f, 137.0f, 138.0f, 139.0f, 140.0f, 141.0f, 142.0f, 143.0f, 144.0f, 145.0f, 146.0f, 147.0f, 148.0f, 149.0f, 150.0f, 151.0f, 152.0f, 153.0f, 154.0f, 155.0f, 156.0f, 157.0f, 158.0f, 159.0f, 160.0f, 161.0f, 162.0f, 163.0f, 164.0f, 165.0f, 166.0f, 167.0f, 168.0f, 169.0f, 170.0f, 171.0f, 172.0f, 173.0f, 174.0f, 175.0f, 176.0f, 177.0f, 178.0f, 179.0f, 180.0f, 181.0f, 182.0f, 183.0f, 184.0f, 185.0f, 186.0f, 187.0f, 188.0f, 189.0f, 190.0f, 191.0f, 192.0f, 193.0f, 194.0f, 195.0f, 196.0f, 197.0f, 198.0f, 199.0f, 200.0f, 201.0f, 202.0f, 203.0f, 204.0f, 205.0f, 206.0f, 207.0f, 208.0f, 209.0f, 210.0f, 211.0f, 212.0f, 213.0f, 214.0f, 215.0f, 216.0f, 217.0f, 218.0f, 219.0f, 220.0f, 221.0f, 222.0f, 223.0f, 224.0f, 225.0f, 226.0f, 227.0f, 228.0f, 229.0f, 230.0f, 231.0f, 232.0f, 233.0f, 234.0f, 235.0f, 236.0f, 237.0f, 238.0f, 239.0f, 240.0f, 241.0f, 242.0f, 243.0f, 244.0f, 245.0f, 246.0f, 247.0f, 248.0f, 249.0f, 250.0f, 251.0f, 252.0f, 253.0f, 254.0f, 255.0f, 256.0f, 257.0f, 258.0f, 259.0f, 260.0f, 261.0f, 262.0f, 263.0f, 264.0f, 265.0f, 266.0f, 267.0f, 268.0f, 269.0f, 270.0f, 271.0f, 272.0f, 273.0f, 274.0f, 275.0f, 276.0f, 277.0f, 278.0f, 279.0f, 280.0f, 281.0f, 282.0f, 283.0f, 284.0f, 285.0f, 286.0f, 287.0f, 288.0f, 289.0f, 290.0f, 291.0f, 292.0f, 293.0f, 294.0f, 295.0f, 296.0f, 297.0f, 298.0f, 299.0f, 300.0f, 301.0f, 302.0f, 303.0f, 304.0f, 305.0f, 306.0f, 307.0f, 308.0f, 309.0f, 310.0f, 311.0f, 312.0f, 313.0f, 314.0f, 315.0f, 316.0f, 317.0f, 318.0f, 319.0f, 320.0f, 321.0f, 322.0f, 323.0f, 324.0f, 325.0f, 326.0f, 327.0f, 328.0f, 329.0f, 330.0f, 331.0f, 332.0f, 333.0f, 334.0f, 335.0f, 336.0f, 337.0f, 338.0f, 339.0f, 340.0f, 341.0f, 342.0f, 343.0f, 344.0f, 345.0f, 346.0f, 347.0f, 348.0f, 349.0f, 350.0f, 351.0f, 352.0f, 353.0f, 354.0f, 355.0f, 356.0f, 357.0f, 358.0f, 359.0f, 360.0f, 361.0f, 362.0f, 363.0f, 364.0f, 365.0f, 366.0f, 367.0f, 368.0f, 369.0f, 370.0f, 371.0f, 372.0f, 373.0f, 374.0f, 375.0f, 376.0f, 377.0f, 378.0f, 379.0f, 380.0f, 381.0f, 382.0f, 383.0f, 384.0f, 385.0f, 386.0f, 387.0f, 388.0f, 389.0f, 390.0f, 391.0f, 392.0f, 393.0f, 394.0f, 395.0f, 396.0f, 397.0f, 398.0f, 399.0f, 400.0f, 401.0f, 402.0f, 403.0f, 404.0f, 405.0f, 406.0f, 407.0f, 408.0f, 409.0f, 410.0f, 411.0f, 412.0f, 413.0f, 414.0f, 415.0f, 416.0f, 417.0f, 418.0f, 419.0f, 420.0f, 421.0f, 422.0f, 423.0f, 424.0f, 425.0f, 426.0f, 427.0f, 428.0f, 429.0f, 430.0f, 431.0f, 432.0f, 433.0f, 434.0f, 435.0f, 436.0f, 437.0f, 438.0f, 439.0f, 440.0f, 441.0f, 442.0f, 443.0f, 444.0f, 445.0f, 446.0f, 447.0f, 448.0f, 449.0f, 450.0f, 451.0f, 452.0f, 453.0f, 454.0f, 455.0f, 456.0f, 457.0f, 458.0f, 459.0f, 460.0f, 461.0f, 462.0f, 463.0f, 464.0f, 465.0f, 466.0f, 467.0f, 468.0f, 469.0f, 470.0f, 471.0f, 472.0f, 473.0f, 474.0f, 475.0f, 476.0f, 477.0f, 478.0f, 479.0f, 480.0f, 481.0f, 482.0f, 483.0f, 484.0f, 485.0f, 486.0f, 487.0f, 488.0f, 489.0f, 490.0f, 491.0f, 492.0f, 493.0f, 494.0f, 495.0f, 496.0f, 497.0f, 498.0f, 499.0f, 500.0f, 501.0f, 502.0f, 503.0f, 504.0f, 505.0f, 506.0f, 507.0f, 508.0f, 509.0f, 510.0f, 511.0f, 512.0f, 513.0f, 514.0f, 515.0f, 516.0f, 517.0f, 518.0f, 519.0f, 520.0f, 521.0f, 522.0f, 523.0f, 524.0f, 525.0f, 526.0f, 527.0f, 528.0f, 529.0f, 530.0f, 531.0f, 532.0f, 533.0f, 534.0f, 535.0f, 536.0f, 537.0f, 538.0f, 539.0f, 540.0f, 541.0f, 542.0f, 543.0f, 544.0f, 545.0f, 546.0f, 547.0f, 548.0f, 549.0f, 550.0f, 551.0f, 552.0f, 553.0f, 554.0f, 555.0f, 556.0f, 557.0f, 558.0f, 559.0f, 560.0f, 561.0f, 562.0f, 563.0f, 564.0f, 565.0f, 566.0f, 567.0f, 568.0f, 569.0f, 570.0f, 571.0f, 572.0f, 573.0f, 574.0f, 575.0f, 576.0f, 577.0f, 578.0f, 579.0f, 580.0f, 581.0f, 582.0f, 583.0f, 584.0f, 585.0f, 586.0f, 587.0f, 588.0f, 589.0f, 590.0f, 591.0f, 592.0f, 593.0f, 594.0f, 595.0f, 596.0f, 597.0f, 598.0f, 599.0f, 600.0f, 601.0f, 602.0f, 603.0f, 604.0f, 605.0f, 606.0f, 607.0f, 608.0f, 609.0f, 610.0f, 611.0f, 612.0f, 613.0f, 614.0f, 615.0f, 616.0f, 617.0f, 618.0f, 619.0f, 620.0f, 621.0f, 622.0f, 623.0f, 624.0f, 625.0f, 626.0f, 627.0f, 628.0f, 629.0f, 630.0f, 631.0f, 632.0f, 633.0f, 634.0f, 635.0f, 636.0f, 637.0f, 638.0f, 639.0f, 640.0f, 641.0f, 642.0f, 643.0f, 644.0f, 645.0f, 646.0f, 647.0f, 648.0f, 649.0f, 650.0f, 651.0f, 652.0f, 653.0f, 654.0f, 655.0f, 656.0f, 657.0f, 658.0f, 659.0f, 660.0f, 661.0f, 662.0f, 663.0f, 664.0f, 665.0f, 666.0f, 667.0f, 668.0f, 669.0f, 670.0f, 671.0f, 672.0f, 673.0f, 674.0f, 675.0f, 676.0f, 677.0f, 678.0f, 679.0f, 680.0f, 681.0f, 682.0f, 683.0f, 684.0f, 685.0f, 686.0f, 687.0f, 688.0f, 689.0f, 690.0f, 691.0f, 692.0f, 693.0f, 694.0f, 695.0f, 696.0f, 697.0f, 698.0f, 699.0f, 700.0f, 701.0f, 702.0f, 703.0f, 704.0f, 705.0f, 706.0f, 707.0f, 708.0f, 709.0f, 710.0f, 711.0f, 712.0f, 713.0f, 714.0f, 715.0f, 716.0f, 717.0f, 718.0f, 719.0f, 720.0f, 721.0f, 722.0f, 723.0f, 724.0f, 725.0f, 726.0f, 727.0f, 728.0f, 729.0f, 730.0f, 731.0f, 732.0f, 733.0f, 734.0f, 735.0f, 736.0f, 737.0f, 738.0f, 739.0f, 740.0f, 741.0f, 742.0f, 743.0f, 744.0f, 745.0f, 746.0f, 747.0f, 748.0f, 749.0f, 750.0f, 751.0f, 752.0f, 753.0f, 754.0f, 755.0f, 756.0f, 757.0f, 758.0f, 759.0f, 760.0f, 761.0f, 762.0f, 763.0f, 764.0f, 765.0f, 766.0f, 767.0f, 768.0f, 769.0f, 770.0f, 771.0f, 772.0f, 773.0f, 774.0f, 775.0f, 776.0f, 777.0f, 778.0f, 779.0f, 780.0f, 781.0f, 782.0f, 783.0f, 784.0f, 785.0f, 786.0f, 787.0f, 788.0f, 789.0f, 790.0f, 791.0f, 792.0f, 793.0f, 794.0f, 795.0f, 796.0f, 797.0f, 798.0f, 799.0f, 800.0f, 801.0f, 802.0f, 803.0f, 804.0f, 805.0f, 806.0f, 807.0f, 808.0f, 809.0f, 810.0f, 811.0f, 812.0f, 813.0f, 814.0f, 815.0f, 816.0f, 817.0f, 818.0f, 819.0f, 820.0f, 821.0f, 822.0f, 823.0f, 824.0f, 825.0f, 826.0f, 827.0f, 828.0f, 829.0f, 830.0f, 831.0f, 832.0f, 833.0f, 834.0f, 835.0f, 836.0f, 837.0f, 838.0f, 839.0f, 840.0f, 841.0f, 842.0f, 843.0f, 844.0f, 845.0f, 846.0f, 847.0f, 848.0f, 849.0f, 850.0f, 851.0f, 852.0f, 853.0f, 854.0f, 855.0f, 856.0f, 857.0f, 858.0f, 859.0f, 860.0f, 861.0f, 862.0f, 863.0f, 864.0f, 865.0f, 866.0f, 867.0f, 868.0f, 869.0f, 870.0f, 871.0f, 872.0f, 873.0f, 874.0f, 875.0f, 876.0f, 877.0f, 878.0f, 879.0f, 880.0f, 881.0f, 882.0f, 883.0f, 884.0f, 885.0f, 886.0f, 887.0f, 888.0f, 889.0f, 890.0f, 891.0f, 892.0f, 893.0f, 894.0f, 895.0f, 896.0f, 897.0f, 898.0f, 899.0f, 900.0f, 901.0f, 902.0f, 903.0f, 904.0f, 905.0f, 906.0f, 907.0f, 908.0f, 909.0f, 910.0f, 911.0f, 912.0f, 913.0f, 914.0f, 915.0f, 916.0f, 917.0f, 918.0f, 919.0f, 920.0f, 921.0f, 922.0f, 923.0f, 924.0f, 925.0f, 926.0f, 927.0f, 928.0f, 929.0f, 930.0f, 931.0f, 932.0f, 933.0f, 934.0f, 935.0f, 936.0f, 937.0f, 938.0f, 939.0f, 940.0f, 941.0f, 942.0f, 943.0f, 944.0f, 945.0f, 946.0f, 947.0f, 948.0f, 949.0f, 950.0f, 951.0f, 952.0f, 953.0f, 954.0f, 955.0f, 956.0f, 957.0f, 958.0f, 959.0f, 960.0f, 961.0f, 962.0f, 963.0f, 964.0f, 965.0f, 966.0f, 967.0f, 968.0f, 969.0f, 970.0f, 971.0f, 972.0f, 973.0f, 974.0f, 975.0f, 976.0f, 977.0f, 978.0f, 979.0f, 980.0f, 981.0f, 982.0f, 983.0f, 984.0f, 985.0f, 986.0f, 987.0f, 988.0f, 989.0f, 990.0f, 991.0f, 992.0f, 993.0f, 994.0f, 995.0f, 996.0f, 997.0f, 998.0f, 999.0f, 1000.0f, 1001.0f, 1002.0f, 1003.0f, 1004.0f, 1005.0f, 1006.0f, 1007.0f, 1008.0f, 1009.0f, 1010.0f, 1011.0f, 1012.0f, 1013.0f, 1014.0f, 1015.0f, 1016.0f, 1017.0f, 1018.0f, 1019.0f, 1020.0f, 1021.0f, 1022.0f, 1023.0f, 1024.0f, 1025.0f, 1026.0f, 1027.0f, 1028.0f, 1029.0f, 1030.0f, 1031.0f, 1032.0f, 1033.0f, 1034.0f, 1035.0f, 1036.0f, 1037.0f, 1038.0f, 1039.0f, 1040.0f, 1041.0f, 1042.0f, 1043.0f, 1044.0f, 1045.0f, 1046.0f, 1047.0f, 1048.0f, 1049.0f, 1050.0f, 1051.0f, 1052.0f, 1053.0f, 1054.0f, 1055.0f, 1056.0f, 1057.0f, 1058.0f, 1059.0f, 1060.0f, 1061.0f, 1062.0f, 1063.0f, 1064.0f, 1065.0f, 1066.0f, 1067.0f, 1068.0f, 1069.0f, 1070.0f, 1071.0f, 1072.0f, 1073.0f, 1074.0f, 1075.0f, 1076.0f, 1077.0f, 1078.0f, 1079.0f, 1080.0f, 1081.0f, 1082.0f, 1083.0f, 1084.0f, 1085.0f, 1086.0f, 1087.0f, 1088.0f, 1089.0f, 1090.0f, 1091.0f, 1092.0f, 1093.0f, 1094.0f, 1095.0f, 1096.0f, 1097.0f, 1098.0f, 1099.0f, 1100.0f, 1101.0f, 1102.0f, 1103.0f, 1104.0f, 1105.0f, 1106.0f, 1107.0f, 1108.0f, 1109.0f, 1110.0f, 1111.0f, 1112.0f, 1113.0f, 1114.0f, 1115.0f, 1116.0f, 1117.0f, 1118.0f, 1119.0f, 1120.0f, 1121.0f, 1122.0f, 1123.0f, 1124.0f, 1125.0f, 1126.0f, 1127.0f, 1128.0f, 1129.0f, 1130.0f, 1131.0f, 1132.0f, 1133.0f, 1134.0f, 1135.0f, 1136.0f, 1137.0f, 1138.0f, 1139.0f, 1140.0f, 1141.0f, 1142.0f, 1143.0f, 1144.0f, 1145.0f, 1146.0f, 1147.0f, 1148.0f, 1149.0f, 1150.0f, 1151.0f, 1152.0f, 1153.0f, 1154.0f, 1155.0f, 1156.0f, 1157.0f, 1158.0f, 1159.0f, 1160.0f, 1161.0f, 1162.0f, 1163.0f, 1164.0f, 1165.0f, 1166.0f, 1167.0f, 1168.0f, 1169.0f, 1170.0f, 1171.0f, 1172.0f, 1173.0f, 1174.0f, 1175.0f, 1176.0f, 1177.0f, 1178.0f, 1179.0f, 1180.0f, 1181.0f, 1182.0f, 1183.0f, 1184.0f, 1185.0f, 1186.0f, 1187.0f, 1188.0f, 1189.0f, 1190.0f, 1191.0f, 1192.0f, 1193.0f, 1194.0f, 1195.0f, 1196.0f, 1197.0f, 1198.0f, 1199.0f, 1200.0f, 1201.0f, 1202.0f, 1203.0f, 1204.0f, 1205.0f, 1206.0f, 1207.0f, 1208.0f, 1209.0f, 1210.0f, 1211.0f, 1212.0f, 1213.0f, 1214.0f, 1215.0f, 1216.0f, 1217.0f, 1218.0f, 1219.0f, 1220.0f, 1221.0f, 1222.0f, 1223.0f, 1224.0f, 1225.0f, 1226.0f, 1227.0f, 1228.0f, 1229.0f, 1230.0f, 1231.0f, 1232.0f, 1233.0f, 1234.0f, 1235.0f, 1236.0f, 1237.0f, 1238.0f, 1239.0f, 1240.0f, 1241.0f, 1242.0f, 1243.0f, 1244.0f, 1245.0f, 1246.0f, 1247.0f, 1248.0f, 1249.0f, 1250.0f, 1251.0f, 1252.0f, 1253.0f, 1254.0f, 1255.0f, 1256.0f, 1257.0f, 1258.0f, 1259.0f, 1260.0f, 1261.0f, 1262.0f, 1263.0f, 1264.0f, 1265.0f, 1266.0f, 1267.0f, 1268.0f, 1269.0f, 1270.0f, 1271.0f, 1272.0f, 1273.0f, 1274.0f, 1275.0f, 1276.0f, 1277.0f, 1278.0f, 1279.0f, 1280.0f, 1281.0f, 1282.0f, 1283.0f, 1284.0f, 1285.0f, 1286.0f, 1287.0f, 1288.0f, 1289.0f, 1290.0f, 1291.0f, 1292.0f, 1293.0f, 1294.0f, 1295.0f, 1296.0f, 1297.0f, 1298.0f, 1299.0f, 1300.0f, 1301.0f, 1302.0f, 1303.0f, 1304.0f, 1305.0f, 1306.0f, 1307.0f, 1308.0f, 1309.0f, 1310.0f, 1311.0f, 1312.0f, 1313.0f, 1314.0f, 1315.0f, 1316.0f, 1317.0f, 1318.0f, 1319.0f, 1320.0f, 1321.0f, 1322.0f, 1323.0f, 1324.0f, 1325.0f, 1326.0f, 1327.0f, 1328.0f, 1329.0f, 1330.0f, 1331.0f, 1332.0f, 1333.0f, 1334.0f, 1335.0f, 1336.0f, 1337.0f, 1338.0f, 1339.0f, 1340.0f, 1341.0f, 1342.0f, 1343.0f, 1344.0f, 1345.0f, 1346.0f, 1347.0f, 1348.0f, 1349.0f, 1350.0f, 1351.0f, 1352.0f, 1353.0f, 1354.0f, 1355.0f, 1356.0f, 1357.0f, 1358.0f, 1359.0f, 1360.0f, 1361.0f, 1362.0f, 1363.0f, 1364.0f, 1365.0f, 1366.0f, 1367.0f, 1368.0f, 1369.0f, 1370.0f, 1371.0f, 1372.0f, 1373.0f, 1374.0f, 1375.0f, 1376.0f, 1377.0f, 1378.0f, 1379.0f, 1380.0f, 1381.0f, 1382.0f, 1383.0f, 1384.0f, 1385.0f, 1386.0f, 1387.0f, 1388.0f, 1389.0f, 1390.0f, 1391.0f, 1392.0f, 1393.0f, 1394.0f, 1395.0f, 1396.0f, 1397.0f, 1398.0f, 1399.0f, 1400.0f, 1401.0f, 1402.0f, 1403.0f, 1404.0f, 1405.0f, 1406.0f, 1407.0f, 1408.0f, 1409.0f, 1410.0f, 1411.0f, 1412.0f, 1413.0f, 1414.0f, 1415.0f, 1416.0f, 1417.0f, 1418.0f, 1419.0f, 1420.0f, 1421.0f, 1422.0f, 1423.0f, 1424.0f, 1425.0f, 1426.0f, 1427.0f, 1428.0f, 1429.0f, 1430.0f, 1431.0f, 1432.0f, 1433.0f, 1434.0f, 1435.0f, 1436.0f, 1437.0f, 1438.0f, 1439.0f, 1440.0f, 1441.0f, 1442.0f, 1443.0f, 1444.0f, 1445.0f, 1446.0f, 1447.0f, 1448.0f, 1449.0f, 1450.0f, 1451.0f, 1452.0f, 1453.0f, 1454.0f, 1455.0f, 1456.0f, 1457.0f, 1458.0f, 1459.0f, 1460.0f, 1461.0f, 1462.0f, 1463.0f, 1464.0f, 1465.0f, 1466.0f, 1467.0f, 1468.0f, 1469.0f, 1470.0f, 1471.0f, 1472.0f, 1473.0f, 1474.0f, 1475.0f, 1476.0f, 1477.0f, 1478.0f, 1479.0f, 1480.0f, 1481.0f, 1482.0f, 1483.0f, 1484.0f, 1485.0f, 1486.0f, 1487.0f, 1488.0f, 1489.0f, 1490.0f, 1491.0f, 1492.0f, 1493.0f, 1494.0f, 1495.0f, 1496.0f, 1497.0f, 1498.0f, 1499.0f, 1500.0f, 1501.0f, 1502.0f, 1503.0f, 1504.0f, 1505.0f, 1506.0f, 1507.0f, 1508.0f, 1509.0f, 1510.0f, 1511.0f, 1512.0f, 1513.0f, 1514.0f, 1515.0f, 1516.0f, 1517.0f, 1518.0f, 1519.0f, 1520.0f, 1521.0f, 1522.0f, 1523.0f, 1524.0f, 1525.0f, 1526.0f, 1527.0f, 1528.0f, 1529.0f, 1530.0f, 1531.0f, 1532.0f, 1533.0f, 1534.0f, 1535.0f, 1536.0f, 1537.0f, 1538.0f, 1539.0f, 1540.0f, 1541.0f, 1542.0f, 1543.0f, 1544.0f, 1545.0f, 1546.0f, 1547.0f, 1548.0f, 1549.0f, 1550.0f, 1551.0f, 1552.0f, 1553.0f, 1554.0f, 1555.0f, 1556.0f, 1557.0f, 1558.0f, 1559.0f, 1560.0f, 1561.0f, 1562.0f, 1563.0f, 1564.0f, 1565.0f, 1566.0f, 1567.0f, 1568.0f, 1569.0f, 1570.0f, 1571.0f, 1572.0f, 1573.0f, 1574.0f, 1575.0f, 1576.0f, 1577.0f, 1578.0f, 1579.0f, 1580.0f, 1581.0f, 1582.0f, 1583.0f, 1584.0f, 1585.0f, 1586.0f, 1587.0f, 1588.0f, 1589.0f, 1590.0f, 1591.0f, 1592.0f, 1593.0f, 1594.0f, 1595.0f, 1596.0f, 1597.0f, 1598.0f, 1599.0f, 1600.0f, 1601.0f, 1602.0f, 1603.0f, 1604.0f, 1605.0f, 1606.0f, 1607.0f, 1608.0f, 1609.0f, 1610.0f, 1611.0f, 1612.0f, 1613.0f, 1614.0f, 1615.0f, 1616.0f, 1617.0f, 1618.0f, 1619.0f, 1620.0f, 1621.0f, 1622.0f, 1623.0f, 1624.0f, 1625.0f, 1626.0f, 1627.0f, 1628.0f, 1629.0f, 1630.0f, 1631.0f, 1632.0f, 1633.0f, 1634.0f, 1635.0f, 1636.0f, 1637.0f, 1638.0f, 1639.0f, 1640.0f, 1641.0f, 1642.0f, 1643.0f, 1644.0f, 1645.0f, 1646.0f, 1647.0f, 1648.0f, 1649.0f, 1650.0f, 1651.0f, 1652.0f, 1653.0f, 1654.0f, 1655.0f, 1656.0f, 1657.0f, 1658.0f, 1659.0f, 1660.0f, 1661.0f, 1662.0f, 1663.0f, 1664.0f, 1665.0f, 1666.0f, 1667.0f, 1668.0f, 1669.0f, 1670.0f, 1671.0f, 1672.0f, 1673.0f, 1674.0f, 1675.0f, 1676.0f, 1677.0f, 1678.0f, 1679.0f, 1680.0f, 1681.0f, 1682.0f, 1683.0f, 1684.0f, 1685.0f, 1686.0f, 1687.0f, 1688.0f, 1689.0f, 1690.0f, 1691.0f, 1692.0f, 1693.0f, 1694.0f, 1695.0f, 1696.0f, 1697.0f, 1698.0f, 1699.0f, 1700.0f, 1701.0f, 1702.0f, 1703.0f, 1704.0f, 1705.0f, 1706.0f, 1707.0f, 1708.0f, 1709.0f, 1710.0f, 1711.0f, 1712.0f, 1713.0f, 1714.0f, 1715.0f, 1716.0f, 1717.0f, 1718.0f, 1719.0f, 1720.0f, 1721.0f, 1722.0f, 1723.0f, 1724.0f, 1725.0f, 1726.0f, 1727.0f, 1728.0f, 1729.0f, 1730.0f, 1731.0f, 1732.0f, 1733.0f, 1734.0f, 1735.0f, 1736.0f, 1737.0f, 1738.0f, 1739.0f, 1740.0f, 1741.0f, 1742.0f, 1743.0f, 1744.0f, 1745.0f, 1746.0f, 1747.0f, 1748.0f, 1749.0f, 1750.0f, 1751.0f, 1752.0f, 1753.0f, 1754.0f, 1755.0f, 1756.0f, 1757.0f, 1758.0f, 1759.0f, 1760.0f, 1761.0f, 1762.0f, 1763.0f, 1764.0f, 1765.0f, 1766.0f, 1767.0f, 1768.0f, 1769.0f, 1770.0f, 1771.0f, 1772.0f, 1773.0f, 1774.0f, 1775.0f, 1776.0f, 1777.0f, 1778.0f, 1779.0f, 1780.0f, 1781.0f, 1782.0f, 1783.0f, 1784.0f, 1785.0f, 1786.0f, 1787.0f, 1788.0f, 1789.0f, 1790.0f, 1791.0f, 1792.0f, 1793.0f, 1794.0f, 1795.0f, 1796.0f, 1797.0f, 1798.0f, 1799.0f, 1800.0f, 1801.0f, 1802.0f, 1803.0f, 1804.0f, 1805.0f, 1806.0f, 1807.0f, 1808.0f, 1809.0f, 1810.0f, 1811.0f, 1812.0f, 1813.0f, 1814.0f, 1815.0f, 1816.0f, 1817.0f, 1818.0f, 1819.0f, 1820.0f, 1821.0f, 1822.0f, 1823.0f, 1824.0f, 1825.0f, 1826.0f, 1827.0f, 1828.0f, 1829.0f, 1830.0f, 1831.0f, 1832.0f, 1833.0f, 1834.0f, 1835.0f, 1836.0f, 1837.0f, 1838.0f, 1839.0f, 1840.0f, 1841.0f, 1842.0f, 1843.0f, 1844.0f, 1845.0f, 1846.0f, 1847.0f, 1848.0f, 1849.0f, 1850.0f, 1851.0f, 1852.0f, 1853.0f, 1854.0f, 1855.0f, 1856.0f, 1857.0f, 1858.0f, 1859.0f, 1860.0f, 1861.0f, 1862.0f, 1863.0f, 1864.0f, 1865.0f, 1866.0f, 1867.0f, 1868.0f, 1869.0f, 1870.0f, 1871.0f, 1872.0f, 1873.0f, 1874.0f, 1875.0f, 1876.0f, 1877.0f, 1878.0f, 1879.0f, 1880.0f, 1881.0f, 1882.0f, 1883.0f, 1884.0f, 1885.0f, 1886.0f, 1887.0f, 1888.0f, 1889.0f, 1890.0f, 1891.0f, 1892.0f, 1893.0f, 1894.0f, 1895.0f, 1896.0f, 1897.0f, 1898.0f, 1899.0f, 1900.0f, 1901.0f, 1902.0f, 1903.0f, 1904.0f, 1905.0f, 1906.0f, 1907.0f, 1908.0f, 1909.0f, 1910.0f, 1911.0f, 1912.0f, 1913.0f, 1914.0f, 1915.0f, 1916.0f, 1917.0f, 1918.0f, 1919.0f, 1920.0f, 1921.0f, 1922.0f, 1923.0f, 1924.0f, 1925.0f, 1926.0f, 1927.0f, 1928.0f, 1929.0f, 1930.0f, 1931.0f, 1932.0f, 1933.0f, 1934.0f, 1935.0f, 1936.0f, 1937.0f, 1938.0f, 1939.0f, 1940.0f, 1941.0f, 1942.0f, 1943.0f, 1944.0f, 1945.0f, 1946.0f, 1947.0f, 1948.0f, 1949.0f, 1950.0f, 1951.0f, 1952.0f, 1953.0f, 1954.0f, 1955.0f, 1956.0f, 1957.0f, 1958.0f, 1959.0f, 1960.0f, 1961.0f, 1962.0f, 1963.0f, 1964.0f, 1965.0f, 1966.0f, 1967.0f, 1968.0f, 1969.0f, 1970.0f, 1971.0f, 1972.0f, 1973.0f, 1974.0f, 1975.0f, 1976.0f, 1977.0f, 1978.0f, 1979.0f, 1980.0f, 1981.0f, 1982.0f, 1983.0f, 1984.0f, 1985.0f, 1986.0f, 1987.0f, 1988.0f, 1989.0f, 1990.0f, 1991.0f, 1992.0f, 1993.0f, 1994.0f, 1995.0f, 1996.0f, 1997.0f, 1998.0f, 1999.0f, 2000.0f, 2001.0f, 2002.0f, 2003.0f, 2004.0f, 2005.0f, 2006.0f, 2007.0f, 2008.0f, 2009.0f, 2010.0f, 2011.0f, 2012.0f, 2013.0f, 2014.0f, 2015.0f, 2016.0f, 2017.0f, 2018.0f, 2019.0f, 2020.0f, 2021.0f, 2022.0f, 2023.0f, 2024.0f, 2025.0f, 2026.0f, 2027.0f, 2028.0f, 2029.0f, 2030.0f, 2031.0f, 2032.0f, 2033.0f, 2034.0f, 2035.0f, 2036.0f, 2037.0f, 2038.0f, 2039.0f, 2040.0f, 2041.0f, 2042.0f, 2043.0f, 2044.0f, 2045.0f, 2046.0f, 2047.0f, 2048.0f, 2049.0f, 2050.0f, 2051.0f, 2052.0f, 2053.0f, 2054.0f, 2055.0f, 2056.0f, 2057.0f, 2058.0f, 2059.0f, 2060.0f, 2061.0f, 2062.0f, 2063.0f, 2064.0f, 2065.0f, 2066.0f, 2067.0f, 2068.0f, 2069.0f, 2070.0f, 2071.0f, 2072.0f, 2073.0f, 2074.0f, 2075.0f, 2076.0f, 2077.0f, 2078.0f, 2079.0f, 2080.0f, 2081.0f, 2082.0f, 2083.0f, 2084.0f, 2085.0f, 2086.0f, 2087.0f, 2088.0f, 2089.0f, 2090.0f, 2091.0f, 2092.0f, 2093.0f, 2094.0f, 2095.0f, 2096.0f, 2097.0f, 2098.0f, 2099.0f, 2100.0f, 2101.0f, 2102.0f, 2103.0f, 2104.0f, 2105.0f, 2106.0f, 2107.0f, 2108.0f, 2109.0f, 2110.0f, 2111.0f, 2112.0f, 2113.0f, 2114.0f, 2115.0f, 2116.0f, 2117.0f, 2118.0f, 2119.0f, 2120.0f, 2121.0f, 2122.0f, 2123.0f, 2124.0f, 2125.0f, 2126.0f, 2127.0f, 2128.0f, 2129.0f, 2130.0f, 2131.0f, 2132.0f, 2133.0f, 2134.0f, 2135.0f, 2136.0f, 2137.0f, 2138.0f, 2139.0f, 2140.0f, 2141.0f, 2142.0f, 2143.0f, 2144.0f, 2145.0f, 2146.0f, 2147.0f, 2148.0f, 2149.0f, 2150.0f, 2151.0f, 2152.0f, 2153.0f, 2154.0f, 2155.0f, 2156.0f, 2157.0f, 2158.0f, 2159.0f, 2160.0f, 2161.0f, 2162.0f, 2163.0f, 2164.0f, 2165.0f, 2166.0f, 2167.0f, 2168.0f, 2169.0f, 2170.0f, 2171.0f, 2172.0f, 2173.0f, 2174.0f, 2175.0f, 2176.0f, 2177.0f, 2178.0f, 2179.0f, 2180.0f, 2181.0f, 2182.0f, 2183.0f, 2184.0f, 2185.0f, 2186.0f, 2187.0f, 2188.0f, 2189.0f, 2190.0f, 2191.0f, 2192.0f, 2193.0f, 2194.0f, 2195.0f, 2196.0f, 2197.0f, 2198.0f, 2199.0f, 2200.0f, 2201.0f, 2202.0f, 2203.0f, 2204.0f, 2205.0f, 2206.0f, 2207.0f, 2208.0f, 2209.0f, 2210.0f, 2211.0f, 2212.0f, 2213.0f, 2214.0f, 2215.0f, 2216.0f, 2217.0f, 2218.0f, 2219.0f, 2220.0f, 2221.0f, 2222.0f, 2223.0f, 2224.0f, 2225.0f, 2226.0f, 2227.0f, 2228.0f, 2229.0f, 2230.0f, 2231.0f, 2232.0f, 2233.0f, 2234.0f, 2235.0f, 2236.0f, 2237.0f, 2238.0f, 2239.0f, 2240.0f, 2241.0f, 2242.0f, 2243.0f, 2244.0f, 2245.0f, 2246.0f, 2247.0f, 2248.0f, 2249.0f, 2250.0f, 2251.0f, 2252.0f, 2253.0f, 2254.0f, 2255.0f, 2256.0f, 2257.0f, 2258.0f, 2259.0f, 2260.0f, 2261.0f, 2262.0f, 2263.0f, 2264.0f, 2265.0f, 2266.0f, 2267.0f, 2268.0f, 2269.0f, 2270.0f, 2271.0f, 2272.0f, 2273.0f, 2274.0f, 2275.0f, 2276.0f, 2277.0f, 2278.0f, 2279.0f, 2280.0f, 2281.0f, 2282.0f, 2283.0f, 2284.0f, 2285.0f, 2286.0f, 2287.0f, 2288.0f, 2289.0f, 2290.0f, 2291.0f, 2292.0f, 2293.0f, 2294.0f, 2295.0f, 2296.0f, 2297.0f, 2298.0f, 2299.0f, 2300.0f, 2301.0f, 2302.0f, 2303.0f, 2304.0f, 2305.0f, 2306.0f, 2307.0f, 2308.0f, 2309.0f, 2310.0f, 2311.0f, 2312.0f, 2313.0f, 2314.0f, 2315.0f, 2316.0f, 2317.0f, 2318.0f, 2319.0f, 2320.0f, 2321.0f, 2322.0f, 2323.0f, 2324.0f, 2325.0f, 2326.0f, 2327.0f, 2328.0f, 2329.0f, 2330.0f, 2331.0f, 2332.0f, 2333.0f, 2334.0f, 2335.0f, 2336.0f, 2337.0f, 2338.0f, 2339.0f, 2340.0f, 2341.0f, 2342.0f, 2343.0f, 2344.0f, 2345.0f, 2346.0f, 2347.0f, 2348.0f, 2349.0f, 2350.0f, 2351.0f, 2352.0f, 2353.0f, 2354.0f, 2355.0f, 2356.0f, 2357.0f, 2358.0f, 2359.0f, 2360.0f, 2361.0f, 2362.0f, 2363.0f, 2364.0f, 2365.0f, 2366.0f, 2367.0f, 2368.0f, 2369.0f, 2370.0f, 2371.0f, 2372.0f, 2373.0f, 2374.0f, 2375.0f, 2376.0f, 2377.0f, 2378.0f, 2379.0f, 2380.0f, 2381.0f, 2382.0f, 2383.0f, 2384.0f, 2385.0f, 2386.0f, 2387.0f, 2388.0f, 2389.0f, 2390.0f, 2391.0f, 2392.0f, 2393.0f, 2394.0f, 2395.0f, 2396.0f, 2397.0f, 2398.0f, 2399.0f, 2400.0f, 2401.0f, 2402.0f, 2403.0f, 2404.0f, 2405.0f, 2406.0f, 2407.0f, 2408.0f, 2409.0f, 2410.0f, 2411.0f, 2412.0f, 2413.0f, 2414.0f, 2415.0f, 2416.0f, 2417.0f, 2418.0f, 2419.0f, 2420.0f, 2421.0f, 2422.0f, 2423.0f, 2424.0f, 2425.0f, 2426.0f, 2427.0f, 2428.0f, 2429.0f, 2430.0f, 2431.0f, 2432.0f, 2433.0f, 2434.0f, 2435.0f, 2436.0f, 2437.0f, 2438.0f, 2439.0f, 2440.0f, 2441.0f, 2442.0f, 2443.0f, 2444.0f, 2445.0f, 2446.0f, 2447.0f, 2448.0f, 2449.0f, 2450.0f, 2451.0f, 2452.0f, 2453.0f, 2454.0f, 2455.0f, 2456.0f, 2457.0f, 2458.0f, 2459.0f, 2460.0f, 2461.0f, 2462.0f, 2463.0f, 2464.0f, 2465.0f, 2466.0f, 2467.0f, 2468.0f, 2469.0f, 2470.0f, 2471.0f, 2472.0f, 2473.0f, 2474.0f, 2475.0f, 2476.0f, 2477.0f, 2478.0f, 2479.0f, 2480.0f, 2481.0f, 2482.0f, 2483.0f, 2484.0f, 2485.0f, 2486.0f, 2487.0f, 2488.0f, 2489.0f, 2490.0f, 2491.0f, 2492.0f, 2493.0f, 2494.0f, 2495.0f, 2496.0f, 2497.0f, 2498.0f, 2499.0f, 2500.0f, 2501.0f, 2502.0f, 2503.0f, 2504.0f, 2505.0f, 2506.0f, 2507.0f, 2508.0f, 2509.0f, 2510.0f, 2511.0f, 2512.0f, 2513.0f, 2514.0f, 2515.0f, 2516.0f, 2517.0f, 2518.0f, 2519.0f, 2520.0f, 2521.0f, 2522.0f, 2523.0f, 2524.0f, 2525.0f, 2526.0f, 2527.0f, 2528.0f, 2529.0f, 2530.0f, 2531.0f, 2532.0f, 2533.0f, 2534.0f, 2535.0f, 2536.0f, 2537.0f, 2538.0f, 2539.0f, 2540.0f, 2541.0f, 2542.0f, 2543.0f, 2544.0f, 2545.0f, 2546.0f, 2547.0f, 2548.0f, 2549.0f, 2550.0f, 2551.0f, 2552.0f, 2553.0f, 2554.0f, 2555.0f, 2556.0f, 2557.0f, 2558.0f, 2559.0f, 2560.0f, 2561.0f, 2562.0f, 2563.0f, 2564.0f, 2565.0f, 2566.0f, 2567.0f, 2568.0f, 2569.0f, 2570.0f, 2571.0f, 2572.0f, 2573.0f, 2574.0f, 2575.0f, 2576.0f, 2577.0f, 2578.0f, 2579.0f, 2580.0f, 2581.0f, 2582.0f, 2583.0f, 2584.0f, 2585.0f, 2586.0f, 2587.0f, 2588.0f, 2589.0f, 2590.0f, 2591.0f, 2592.0f, 2593.0f, 2594.0f, 2595.0f, 2596.0f, 2597.0f, 2598.0f, 2599.0f, 2600.0f, 2601.0f, 2602.0f, 2603.0f, 2604.0f, 2605.0f, 2606.0f, 2607.0f, 2608.0f, 2609.0f, 2610.0f, 2611.0f, 2612.0f, 2613.0f, 2614.0f, 2615.0f, 2616.0f, 2617.0f, 2618.0f, 2619.0f, 2620.0f, 2621.0f, 2622.0f, 2623.0f, 2624.0f, 2625.0f, 2626.0f, 2627.0f, 2628.0f, 2629.0f, 2630.0f, 2631.0f, 2632.0f, 2633.0f, 2634.0f, 2635.0f, 2636.0f, 2637.0f, 2638.0f, 2639.0f, 2640.0f, 2641.0f, 2642.0f, 2643.0f, 2644.0f, 2645.0f, 2646.0f, 2647.0f, 2648.0f, 2649.0f, 2650.0f, 2651.0f, 2652.0f, 2653.0f, 2654.0f, 2655.0f, 2656.0f, 2657.0f, 2658.0f, 2659.0f, 2660.0f, 2661.0f, 2662.0f, 2663.0f, 2664.0f, 2665.0f, 2666.0f, 2667.0f, 2668.0f, 2669.0f, 2670.0f, 2671.0f, 2672.0f, 2673.0f, 2674.0f, 2675.0f, 2676.0f, 2677.0f, 2678.0f, 2679.0f, 2680.0f, 2681.0f, 2682.0f, 2683.0f, 2684.0f, 2685.0f, 2686.0f, 2687.0f, 2688.0f, 2689.0f, 2690.0f, 2691.0f, 2692.0f, 2693.0f, 2694.0f, 2695.0f, 2696.0f, 2697.0f, 2698.0f, 2699.0f, 2700.0f, 2701.0f, 2702.0f, 2703.0f, 2704.0f, 2705.0f, 2706.0f, 2707.0f, 2708.0f, 2709.0f, 2710.0f, 2711.0f, 2712.0f, 2713.0f, 2714.0f, 2715.0f, 2716.0f, 2717.0f, 2718.0f, 2719.0f, 2720.0f, 2721.0f, 2722.0f, 2723.0f, 2724.0f, 2725.0f, 2726.0f, 2727.0f, 2728.0f, 2729.0f, 2730.0f, 2731.0f, 2732.0f, 2733.0f, 2734.0f, 2735.0f, 2736.0f, 2737.0f, 2738.0f, 2739.0f, 2740.0f, 2741.0f, 2742.0f, 2743.0f, 2744.0f, 2745.0f, 2746.0f, 2747.0f, 2748.0f, 2749.0f, 2750.0f, 2751.0f, 2752.0f, 2753.0f, 2754.0f, 2755.0f, 2756.0f, 2757.0f, 2758.0f, 2759.0f, 2760.0f, 2761.0f, 2762.0f, 2763.0f, 2764.0f, 2765.0f, 2766.0f, 2767.0f, 2768.0f, 2769.0f, 2770.0f, 2771.0f, 2772.0f, 2773.0f, 2774.0f, 2775.0f, 2776.0f, 2777.0f, 2778.0f, 2779.0f, 2780.0f, 2781.0f, 2782.0f, 2783.0f, 2784.0f, 2785.0f, 2786.0f, 2787.0f, 2788.0f, 2789.0f, 2790.0f, 2791.0f, 2792.0f, 2793.0f, 2794.0f, 2795.0f, 2796.0f, 2797.0f, 2798.0f, 2799.0f, 2800.0f, 2801.0f, 2802.0f, 2803.0f, 2804.0f, 2805.0f, 2806.0f, 2807.0f, 2808.0f, 2809.0f, 2810.0f, 2811.0f, 2812.0f, 2813.0f, 2814.0f, 2815.0f, 2816.0f, 2817.0f, 2818.0f, 2819.0f, 2820.0f, 2821.0f, 2822.0f, 2823.0f, 2824.0f, 2825.0f, 2826.0f, 2827.0f, 2828.0f, 2829.0f, 2830.0f, 2831.0f, 2832.0f, 2833.0f, 2834.0f, 2835.0f, 2836.0f, 2837.0f, 2838.0f, 2839.0f, 2840.0f, 2841.0f, 2842.0f, 2843.0f, 2844.0f, 2845.0f, 2846.0f, 2847.0f, 2848.0f, 2849.0f, 2850.0f, 2851.0f, 2852.0f, 2853.0f, 2854.0f, 2855.0f, 2856.0f, 2857.0f, 2858.0f, 2859.0f, 2860.0f, 2861.0f, 2862.0f, 2863.0f, 2864.0f, 2865.0f, 2866.0f, 2867.0f, 2868.0f, 2869.0f, 2870.0f, 2871.0f, 2872.0f, 2873.0f, 2874.0f, 2875.0f, 2876.0f, 2877.0f, 2878.0f, 2879.0f, 2880.0f, 2881.0f, 2882.0f, 2883.0f, 2884.0f, 2885.0f, 2886.0f, 2887.0f, 2888.0f, 2889.0f, 2890.0f, 2891.0f, 2892.0f, 2893.0f, 2894.0f, 2895.0f, 2896.0f, 2897.0f, 2898.0f, 2899.0f, 2900.0f, 2901.0f, 2902.0f, 2903.0f, 2904.0f, 2905.0f, 2906.0f, 2907.0f, 2908.0f, 2909.0f, 2910.0f, 2911.0f, 2912.0f, 2913.0f, 2914.0f, 2915.0f, 2916.0f, 2917.0f, 2918.0f, 2919.0f, 2920.0f, 2921.0f, 2922.0f, 2923.0f, 2924.0f, 2925.0f, 2926.0f, 2927.0f, 2928.0f, 2929.0f, 2930.0f, 2931.0f, 2932.0f, 2933.0f, 2934.0f, 2935.0f, 2936.0f, 2937.0f, 2938.0f, 2939.0f, 2940.0f, 2941.0f, 2942.0f, 2943.0f, 2944.0f, 2945.0f, 2946.0f, 2947.0f, 2948.0f, 2949.0f, 2950.0f, 2951.0f, 2952.0f, 2953.0f, 2954.0f, 2955.0f, 2956.0f, 2957.0f, 2958.0f, 2959.0f, 2960.0f, 2961.0f, 2962.0f, 2963.0f, 2964.0f, 2965.0f, 2966.0f, 2967.0f, 2968.0f, 2969.0f, 2970.0f, 2971.0f, 2972.0f, 2973.0f, 2974.0f, 2975.0f, 2976.0f, 2977.0f, 2978.0f, 2979.0f, 2980.0f, 2981.0f, 2982.0f, 2983.0f, 2984.0f, 2985.0f, 2986.0f, 2987.0f, 2988.0f, 2989.0f, 2990.0f, 2991.0f, 2992.0f, 2993.0f, 2994.0f, 2995.0f, 2996.0f, 2997.0f, 2998.0f, 2999.0f, 3000.0f, 3001.0f, 3002.0f, 3003.0f, 3004.0f, 3005.0f, 3006.0f, 3007.0f, 3008.0f, 3009.0f, 3010.0f, 3011.0f, 3012.0f, 3013.0f, 3014.0f, 3015.0f, 3016.0f, 3017.0f, 3018.0f, 3019.0f, 3020.0f, 3021.0f, 3022.0f, 3023.0f, 3024.0f, 3025.0f, 3026.0f, 3027.0f, 3028.0f, 3029.0f, 3030.0f, 3031.0f, 3032.0f, 3033.0f, 3034.0f, 3035.0f, 3036.0f, 3037.0f, 3038.0f, 3039.0f, 3040.0f, 3041.0f, 3042.0f, 3043.0f, 3044.0f, 3045.0f, 3046.0f, 3047.0f, 3048.0f, 3049.0f, 3050.0f, 3051.0f, 3052.0f, 3053.0f, 3054.0f, 3055.0f, 3056.0f, 3057.0f, 3058.0f, 3059.0f, 3060.0f, 3061.0f, 3062.0f, 3063.0f, 3064.0f, 3065.0f, 3066.0f, 3067.0f, 3068.0f, 3069.0f, 3070.0f, 3071.0f, 3072.0f, 3073.0f, 3074.0f, 3075.0f, 3076.0f, 3077.0f, 3078.0f, 3079.0f, 3080.0f, 3081.0f, 3082.0f, 3083.0f, 3084.0f, 3085.0f, 3086.0f, 3087.0f, 3088.0f, 3089.0f, 3090.0f, 3091.0f, 3092.0f, 3093.0f, 3094.0f, 3095.0f, 3096.0f, 3097.0f, 3098.0f, 3099.0f, 3100.0f, 3101.0f, 3102.0f, 3103.0f, 3104.0f, 3105.0f, 3106.0f, 3107.0f, 3108.0f, 3109.0f, 3110.0f, 3111.0f, 3112.0f, 3113.0f, 3114.0f, 3115.0f, 3116.0f, 3117.0f, 3118.0f, 3119.0f, 3120.0f, 3121.0f, 3122.0f, 3123.0f, 3124.0f, 3125.0f, 3126.0f, 3127.0f, 3128.0f, 3129.0f, 3130.0f, 3131.0f, 3132.0f, 3133.0f, 3134.0f, 3135.0f, 3136.0f, 3137.0f, 3138.0f, 3139.0f, 3140.0f, 3141.0f, 3142.0f, 3143.0f, 3144.0f, 3145.0f, 3146.0f, 3147.0f, 3148.0f, 3149.0f, 3150.0f, 3151.0f, 3152.0f, 3153.0f, 3154.0f, 3155.0f, 3156.0f, 3157.0f, 3158.0f, 3159.0f, 3160.0f, 3161.0f, 3162.0f, 3163.0f, 3164.0f, 3165.0f, 3166.0f, 3167.0f, 3168.0f, 3169.0f, 3170.0f, 3171.0f, 3172.0f, 3173.0f, 3174.0f, 3175.0f, 3176.0f, 3177.0f, 3178.0f, 3179.0f, 3180.0f, 3181.0f, 3182.0f, 3183.0f, 3184.0f, 3185.0f, 3186.0f, 3187.0f, 3188.0f, 3189.0f, 3190.0f, 3191.0f, 3192.0f, 3193.0f, 3194.0f, 3195.0f, 3196.0f, 3197.0f, 3198.0f, 3199.0f, 3200.0f, 3201.0f, 3202.0f, 3203.0f, 3204.0f, 3205.0f, 3206.0f, 3207.0f, 3208.0f, 3209.0f, 3210.0f, 3211.0f, 3212.0f, 3213.0f, 3214.0f, 3215.0f, 3216.0f, 3217.0f, 3218.0f, 3219.0f, 3220.0f, 3221.0f, 3222.0f, 3223.0f, 3224.0f, 3225.0f, 3226.0f, 3227.0f, 3228.0f, 3229.0f, 3230.0f, 3231.0f, 3232.0f, 3233.0f, 3234.0f, 3235.0f, 3236.0f, 3237.0f, 3238.0f, 3239.0f, 3240.0f, 3241.0f, 3242.0f, 3243.0f, 3244.0f, 3245.0f, 3246.0f, 3247.0f, 3248.0f, 3249.0f, 3250.0f, 3251.0f, 3252.0f, 3253.0f, 3254.0f, 3255.0f, 3256.0f, 3257.0f, 3258.0f, 3259.0f, 3260.0f, 3261.0f, 3262.0f, 3263.0f, 3264.0f, 3265.0f, 3266.0f, 3267.0f, 3268.0f, 3269.0f, 3270.0f, 3271.0f, 3272.0f, 3273.0f, 3274.0f, 3275.0f, 3276.0f, 3277.0f, 3278.0f, 3279.0f, 3280.0f, 3281.0f, 3282.0f, 3283.0f, 3284.0f, 3285.0f, 3286.0f, 3287.0f, 3288.0f, 3289.0f, 3290.0f, 3291.0f, 3292.0f, 3293.0f, 3294.0f, 3295.0f, 3296.0f, 3297.0f, 3298.0f, 3299.0f, 3300.0f, 3301.0f, 3302.0f, 3303.0f, 3304.0f, 3305.0f, 3306.0f, 3307.0f, 3308.0f, 3309.0f, 3310.0f, 3311.0f, 3312.0f, 3313.0f, 3314.0f, 3315.0f, 3316.0f, 3317.0f, 3318.0f, 3319.0f, 3320.0f, 3321.0f, 3322.0f, 3323.0f, 3324.0f, 3325.0f, 3326.0f, 3327.0f, 3328.0f, 3329.0f, 3330.0f, 3331.0f, 3332.0f, 3333.0f, 3334.0f, 3335.0f, 3336.0f, 3337.0f, 3338.0f, 3339.0f, 3340.0f, 3341.0f, 3342.0f, 3343.0f, 3344.0f, 3345.0f, 3346.0f, 3347.0f, 3348.0f, 3349.0f, 3350.0f, 3351.0f, 3352.0f, 3353.0f, 3354.0f, 3355.0f, 3356.0f, 3357.0f, 3358.0f, 3359.0f, 3360.0f, 3361.0f, 3362.0f, 3363.0f, 3364.0f, 3365.0f, 3366.0f, 3367.0f, 3368.0f, 3369.0f, 3370.0f, 3371.0f, 3372.0f, 3373.0f, 3374.0f, 3375.0f, 3376.0f, 3377.0f, 3378.0f, 3379.0f, 3380.0f, 3381.0f, 3382.0f, 3383.0f, 3384.0f, 3385.0f, 3386.0f, 3387.0f, 3388.0f, 3389.0f, 3390.0f, 3391.0f, 3392.0f, 3393.0f, 3394.0f, 3395.0f, 3396.0f, 3397.0f, 3398.0f, 3399.0f, 3400.0f, 3401.0f, 3402.0f, 3403.0f, 3404.0f, 3405.0f, 3406.0f, 3407.0f, 3408.0f, 3409.0f, 3410.0f, 3411.0f, 3412.0f, 3413.0f, 3414.0f, 3415.0f, 3416.0f, 3417.0f, 3418.0f, 3419.0f, 3420.0f, 3421.0f, 3422.0f, 3423.0f, 3424.0f, 3425.0f, 3426.0f, 3427.0f, 3428.0f, 3429.0f, 3430.0f, 3431.0f, 3432.0f, 3433.0f, 3434.0f, 3435.0f, 3436.0f, 3437.0f, 3438.0f, 3439.0f, 3440.0f, 3441.0f, 3442.0f, 3443.0f, 3444.0f, 3445.0f, 3446.0f, 3447.0f, 3448.0f, 3449.0f, 3450.0f, 3451.0f, 3452.0f, 3453.0f, 3454.0f, 3455.0f, 3456.0f, 3457.0f, 3458.0f, 3459.0f, 3460.0f, 3461.0f, 3462.0f, 3463.0f, 3464.0f, 3465.0f, 3466.0f, 3467.0f, 3468.0f, 3469.0f, 3470.0f, 3471.0f, 3472.0f, 3473.0f, 3474.0f, 3475.0f, 3476.0f, 3477.0f, 3478.0f, 3479.0f, 3480.0f, 3481.0f, 3482.0f, 3483.0f, 3484.0f, 3485.0f, 3486.0f, 3487.0f, 3488.0f, 3489.0f, 3490.0f, 3491.0f, 3492.0f, 3493.0f, 3494.0f, 3495.0f, 3496.0f, 3497.0f, 3498.0f, 3499.0f, 3500.0f, 3501.0f, 3502.0f, 3503.0f, 3504.0f, 3505.0f, 3506.0f, 3507.0f, 3508.0f, 3509.0f, 3510.0f, 3511.0f, 3512.0f, 3513.0f, 3514.0f, 3515.0f, 3516.0f, 3517.0f, 3518.0f, 3519.0f, 3520.0f, 3521.0f, 3522.0f, 3523.0f, 3524.0f, 3525.0f, 3526.0f, 3527.0f, 3528.0f, 3529.0f, 3530.0f, 3531.0f, 3532.0f, 3533.0f, 3534.0f, 3535.0f, 3536.0f, 3537.0f, 3538.0f, 3539.0f, 3540.0f, 3541.0f, 3542.0f, 3543.0f, 3544.0f, 3545.0f, 3546.0f, 3547.0f, 3548.0f, 3549.0f, 3550.0f, 3551.0f, 3552.0f, 3553.0f, 3554.0f, 3555.0f, 3556.0f, 3557.0f, 3558.0f, 3559.0f, 3560.0f, 3561.0f, 3562.0f, 3563.0f, 3564.0f, 3565.0f, 3566.0f, 3567.0f, 3568.0f, 3569.0f, 3570.0f, 3571.0f, 3572.0f, 3573.0f, 3574.0f, 3575.0f, 3576.0f, 3577.0f, 3578.0f, 3579.0f, 3580.0f, 3581.0f, 3582.0f, 3583.0f, 3584.0f, 3585.0f, 3586.0f, 3587.0f, 3588.0f, 3589.0f, 3590.0f, 3591.0f, 3592.0f, 3593.0f, 3594.0f, 3595.0f, 3596.0f, 3597.0f, 3598.0f, 3599.0f, 3600.0f, 3601.0f, 3602.0f, 3603.0f, 3604.0f, 3605.0f, 3606.0f, 3607.0f, 3608.0f, 3609.0f, 3610.0f, 3611.0f, 3612.0f, 3613.0f, 3614.0f, 3615.0f, 3616.0f, 3617.0f, 3618.0f, 3619.0f, 3620.0f, 3621.0f, 3622.0f, 3623.0f, 3624.0f, 3625.0f, 3626.0f, 3627.0f, 3628.0f, 3629.0f, 3630.0f, 3631.0f, 3632.0f, 3633.0f, 3634.0f, 3635.0f, 3636.0f, 3637.0f, 3638.0f, 3639.0f, 3640.0f, 3641.0f, 3642.0f, 3643.0f, 3644.0f, 3645.0f, 3646.0f, 3647.0f, 3648.0f, 3649.0f, 3650.0f, 3651.0f, 3652.0f, 3653.0f, 3654.0f, 3655.0f, 3656.0f, 3657.0f, 3658.0f, 3659.0f, 3660.0f, 3661.0f, 3662.0f, 3663.0f, 3664.0f, 3665.0f, 3666.0f, 3667.0f, 3668.0f, 3669.0f, 3670.0f, 3671.0f, 3672.0f, 3673.0f, 3674.0f, 3675.0f, 3676.0f, 3677.0f, 3678.0f, 3679.0f, 3680.0f, 3681.0f, 3682.0f, 3683.0f, 3684.0f, 3685.0f, 3686.0f, 3687.0f, 3688.0f, 3689.0f, 3690.0f, 3691.0f, 3692.0f, 3693.0f, 3694.0f, 3695.0f, 3696.0f, 3697.0f, 3698.0f, 3699.0f, 3700.0f, 3701.0f, 3702.0f, 3703.0f, 3704.0f, 3705.0f, 3706.0f, 3707.0f, 3708.0f, 3709.0f, 3710.0f, 3711.0f, 3712.0f, 3713.0f, 3714.0f, 3715.0f, 3716.0f, 3717.0f, 3718.0f, 3719.0f, 3720.0f, 3721.0f, 3722.0f, 3723.0f, 3724.0f, 3725.0f, 3726.0f, 3727.0f, 3728.0f, 3729.0f, 3730.0f, 3731.0f, 3732.0f, 3733.0f, 3734.0f, 3735.0f, 3736.0f, 3737.0f, 3738.0f, 3739.0f, 3740.0f, 3741.0f, 3742.0f, 3743.0f, 3744.0f, 3745.0f, 3746.0f, 3747.0f, 3748.0f, 3749.0f, 3750.0f, 3751.0f, 3752.0f, 3753.0f, 3754.0f, 3755.0f, 3756.0f, 3757.0f, 3758.0f, 3759.0f, 3760.0f, 3761.0f, 3762.0f, 3763.0f, 3764.0f, 3765.0f, 3766.0f, 3767.0f, 3768.0f, 3769.0f, 3770.0f, 3771.0f, 3772.0f, 3773.0f, 3774.0f, 3775.0f, 3776.0f, 3777.0f, 3778.0f, 3779.0f, 3780.0f, 3781.0f, 3782.0f, 3783.0f, 3784.0f, 3785.0f, 3786.0f, 3787.0f, 3788.0f, 3789.0f, 3790.0f, 3791.0f, 3792.0f, 3793.0f, 3794.0f, 3795.0f, 3796.0f, 3797.0f, 3798.0f, 3799.0f, 3800.0f, 3801.0f, 3802.0f, 3803.0f, 3804.0f, 3805.0f, 3806.0f, 3807.0f, 3808.0f, 3809.0f, 3810.0f, 3811.0f, 3812.0f, 3813.0f, 3814.0f, 3815.0f, 3816.0f, 3817.0f, 3818.0f, 3819.0f, 3820.0f, 3821.0f, 3822.0f, 3823.0f, 3824.0f, 3825.0f, 3826.0f, 3827.0f, 3828.0f, 3829.0f, 3830.0f, 3831.0f, 3832.0f, 3833.0f, 3834.0f, 3835.0f, 3836.0f, 3837.0f, 3838.0f, 3839.0f, 3840.0f, 3841.0f, 3842.0f, 3843.0f, 3844.0f, 3845.0f, 3846.0f, 3847.0f, 3848.0f, 3849.0f, 3850.0f, 3851.0f, 3852.0f, 3853.0f, 3854.0f, 3855.0f, 3856.0f, 3857.0f, 3858.0f, 3859.0f, 3860.0f, 3861.0f, 3862.0f, 3863.0f, 3864.0f, 3865.0f, 3866.0f, 3867.0f, 3868.0f, 3869.0f, 3870.0f, 3871.0f, 3872.0f, 3873.0f, 3874.0f, 3875.0f, 3876.0f, 3877.0f, 3878.0f, 3879.0f, 3880.0f, 3881.0f, 3882.0f, 3883.0f, 3884.0f, 3885.0f, 3886.0f, 3887.0f, 3888.0f, 3889.0f, 3890.0f, 3891.0f, 3892.0f, 3893.0f, 3894.0f, 3895.0f, 3896.0f, 3897.0f, 3898.0f, 3899.0f, 3900.0f, 3901.0f, 3902.0f, 3903.0f, 3904.0f, 3905.0f, 3906.0f, 3907.0f, 3908.0f, 3909.0f, 3910.0f, 3911.0f, 3912.0f, 3913.0f, 3914.0f, 3915.0f, 3916.0f, 3917.0f, 3918.0f, 3919.0f, 3920.0f, 3921.0f, 3922.0f, 3923.0f, 3924.0f, 3925.0f, 3926.0f, 3927.0f, 3928.0f, 3929.0f, 3930.0f, 3931.0f, 3932.0f, 3933.0f, 3934.0f, 3935.0f, 3936.0f, 3937.0f, 3938.0f, 3939.0f, 3940.0f, 3941.0f, 3942.0f, 3943.0f, 3944.0f, 3945.0f, 3946.0f, 3947.0f, 3948.0f, 3949.0f, 3950.0f, 3951.0f, 3952.0f, 3953.0f, 3954.0f, 3955.0f, 3956.0f, 3957.0f, 3958.0f, 3959.0f, 3960.0f, 3961.0f, 3962.0f, 3963.0f, 3964.0f, 3965.0f, 3966.0f, 3967.0f, 3968.0f, 3969.0f, 3970.0f, 3971.0f, 3972.0f, 3973.0f, 3974.0f, 3975.0f, 3976.0f, 3977.0f, 3978.0f, 3979.0f, 3980.0f, 3981.0f, 3982.0f, 3983.0f, 3984.0f, 3985.0f, 3986.0f, 3987.0f, 3988.0f, 3989.0f, 3990.0f, 3991.0f, 3992.0f, 3993.0f, 3994.0f, 3995.0f, 3996.0f, 3997.0f, 3998.0f, 3999.0f, 4000.0f, 4001.0f, 4002.0f, 4003.0f, 4004.0f, 4005.0f, 4006.0f, 4007.0f, 4008.0f, 4009.0f, 4010.0f, 4011.0f, 4012.0f, 4013.0f, 4014.0f, 4015.0f, 4016.0f, 4017.0f, 4018.0f, 4019.0f, 4020.0f, 4021.0f, 4022.0f, 4023.0f, 4024.0f, 4025.0f, 4026.0f, 4027.0f, 4028.0f, 4029.0f, 4030.0f, 4031.0f, 4032.0f, 4033.0f, 4034.0f, 4035.0f, 4036.0f, 4037.0f, 4038.0f, 4039.0f, 4040.0f, 4041.0f, 4042.0f, 4043.0f, 4044.0f, 4045.0f, 4046.0f, 4047.0f, 4048.0f, 4049.0f, 4050.0f, 4051.0f, 4052.0f, 4053.0f, 4054.0f, 4055.0f, 4056.0f, 4057.0f, 4058.0f, 4059.0f, 4060.0f, 4061.0f, 4062.0f, 4063.0f, 4064.0f, 4065.0f, 4066.0f, 4067.0f, 4068.0f, 4069.0f, 4070.0f, 4071.0f, 4072.0f, 4073.0f, 4074.0f, 4075.0f, 4076.0f, 4077.0f, 4078.0f, 4079.0f, 4080.0f, 4081.0f, 4082.0f, 4083.0f, 4084.0f, 4085.0f, 4086.0f, 4087.0f, 4088.0f, 4089.0f, 4090.0f, 4091.0f, 4092.0f, 4093.0f, 4094.0f, 4095.0f, 4096.0f, 4097.0f, 4098.0f, 4099.0f, 4100.0f, 4101.0f, 4102.0f, 4103.0f, 4104.0f, 4105.0f, 4106.0f, 4107.0f, 4108.0f, 4109.0f, 4110.0f, 4111.0f, 4112.0f, 4113.0f, 4114.0f, 4115.0f, 4116.0f, 4117.0f, 4118.0f, 4119.0f, 4120.0f, 4121.0f, 4122.0f, 4123.0f, 4124.0f, 4125.0f, 4126.0f, 4127.0f, 4128.0f, 4129.0f, 4130.0f, 4131.0f, 4132.0f, 4133.0f, 4134.0f, 4135.0f, 4136.0f, 4137.0f, 4138.0f, 4139.0f, 4140.0f, 4141.0f, 4142.0f, 4143.0f, 4144.0f, 4145.0f, 4146.0f, 4147.0f, 4148.0f, 4149.0f, 4150.0f, 4151.0f, 4152.0f, 4153.0f, 4154.0f, 4155.0f, 4156.0f, 4157.0f, 4158.0f, 4159.0f, 4160.0f, 4161.0f, 4162.0f, 4163.0f, 4164.0f, 4165.0f, 4166.0f, 4167.0f, 4168.0f, 4169.0f, 4170.0f, 4171.0f, 4172.0f, 4173.0f, 4174.0f, 4175.0f, 4176.0f, 4177.0f, 4178.0f, 4179.0f, 4180.0f, 4181.0f, 4182.0f, 4183.0f, 4184.0f, 4185.0f, 4186.0f, 4187.0f, 4188.0f, 4189.0f, 4190.0f, 4191.0f, 4192.0f, 4193.0f, 4194.0f, 4195.0f, 4196.0f, 4197.0f, 4198.0f, 4199.0f, 4200.0f, 4201.0f, 4202.0f, 4203.0f, 4204.0f, 4205.0f, 4206.0f, 4207.0f, 4208.0f, 4209.0f, 4210.0f, 4211.0f, 4212.0f, 4213.0f, 4214.0f, 4215.0f, 4216.0f, 4217.0f, 4218.0f, 4219.0f, 4220.0f, 4221.0f, 4222.0f, 4223.0f, 4224.0f, 4225.0f, 4226.0f, 4227.0f, 4228.0f, 4229.0f, 4230.0f, 4231.0f, 4232.0f, 4233.0f, 4234.0f, 4235.0f, 4236.0f, 4237.0f, 4238.0f, 4239.0f, 4240.0f, 4241.0f, 4242.0f, 4243.0f, 4244.0f, 4245.0f, 4246.0f, 4247.0f, 4248.0f, 4249.0f, 4250.0f, 4251.0f, 4252.0f, 4253.0f, 4254.0f, 4255.0f, 4256.0f, 4257.0f, 4258.0f, 4259.0f, 4260.0f, 4261.0f, 4262.0f, 4263.0f, 4264.0f, 4265.0f, 4266.0f, 4267.0f, 4268.0f, 4269.0f, 4270.0f, 4271.0f, 4272.0f, 4273.0f, 4274.0f, 4275.0f, 4276.0f, 4277.0f, 4278.0f, 4279.0f, 4280.0f, 4281.0f, 4282.0f, 4283.0f, 4284.0f, 4285.0f, 4286.0f, 4287.0f, 4288.0f, 4289.0f, 4290.0f, 4291.0f, 4292.0f, 4293.0f, 4294.0f, 4295.0f, 4296.0f, 4297.0f, 4298.0f, 4299.0f, 4300.0f, 4301.0f, 4302.0f, 4303.0f, 4304.0f, 4305.0f, 4306.0f, 4307.0f, 4308.0f, 4309.0f, 4310.0f, 4311.0f, 4312.0f, 4313.0f, 4314.0f, 4315.0f, 4316.0f, 4317.0f, 4318.0f, 4319.0f, 4320.0f, 4321.0f, 4322.0f, 4323.0f, 4324.0f, 4325.0f, 4326.0f, 4327.0f, 4328.0f, 4329.0f, 4330.0f, 4331.0f, 4332.0f, 4333.0f, 4334.0f, 4335.0f, 4336.0f, 4337.0f, 4338.0f, 4339.0f, 4340.0f, 4341.0f, 4342.0f, 4343.0f, 4344.0f, 4345.0f, 4346.0f, 4347.0f, 4348.0f, 4349.0f, 4350.0f, 4351.0f, 4352.0f, 4353.0f, 4354.0f, 4355.0f, 4356.0f, 4357.0f, 4358.0f, 4359.0f, 4360.0f, 4361.0f, 4362.0f, 4363.0f, 4364.0f, 4365.0f, 4366.0f, 4367.0f, 4368.0f, 4369.0f, 4370.0f, 4371.0f, 4372.0f, 4373.0f, 4374.0f, 4375.0f, 4376.0f, 4377.0f, 4378.0f, 4379.0f, 4380.0f, 4381.0f, 4382.0f, 4383.0f, 4384.0f, 4385.0f, 4386.0f, 4387.0f, 4388.0f, 4389.0f, 4390.0f, 4391.0f, 4392.0f, 4393.0f, 4394.0f, 4395.0f, 4396.0f, 4397.0f, 4398.0f, 4399.0f, 4400.0f, 4401.0f, 4402.0f, 4403.0f, 4404.0f, 4405.0f, 4406.0f, 4407.0f, 4408.0f, 4409.0f, 4410.0f, 4411.0f, 4412.0f, 4413.0f, 4414.0f, 4415.0f, 4416.0f, 4417.0f, 4418.0f, 4419.0f, 4420.0f, 4421.0f, 4422.0f, 4423.0f, 4424.0f, 4425.0f, 4426.0f, 4427.0f, 4428.0f, 4429.0f, 4430.0f, 4431.0f, 4432.0f, 4433.0f, 4434.0f, 4435.0f, 4436.0f, 4437.0f, 4438.0f, 4439.0f, 4440.0f, 4441.0f, 4442.0f, 4443.0f, 4444.0f, 4445.0f, 4446.0f, 4447.0f, 4448.0f, 4449.0f, 4450.0f, 4451.0f, 4452.0f, 4453.0f, 4454.0f, 4455.0f, 4456.0f, 4457.0f, 4458.0f, 4459.0f, 4460.0f, 4461.0f, 4462.0f, 4463.0f, 4464.0f, 4465.0f, 4466.0f, 4467.0f, 4468.0f, 4469.0f, 4470.0f, 4471.0f, 4472.0f, 4473.0f, 4474.0f, 4475.0f, 4476.0f, 4477.0f, 4478.0f, 4479.0f, 4480.0f, 4481.0f, 4482.0f, 4483.0f, 4484.0f, 4485.0f, 4486.0f, 4487.0f, 4488.0f, 4489.0f, 4490.0f, 4491.0f, 4492.0f, 4493.0f, 4494.0f, 4495.0f, 4496.0f, 4497.0f, 4498.0f, 4499.0f, 4500.0f, 4501.0f, 4502.0f, 4503.0f, 4504.0f, 4505.0f, 4506.0f, 4507.0f, 4508.0f, 4509.0f, 4510.0f, 4511.0f, 4512.0f, 4513.0f, 4514.0f, 4515.0f, 4516.0f, 4517.0f, 4518.0f, 4519.0f, 4520.0f, 4521.0f, 4522.0f, 4523.0f, 4524.0f, 4525.0f, 4526.0f, 4527.0f, 4528.0f, 4529.0f, 4530.0f, 4531.0f, 4532.0f, 4533.0f, 4534.0f, 4535.0f, 4536.0f, 4537.0f, 4538.0f, 4539.0f, 4540.0f, 4541.0f, 4542.0f, 4543.0f, 4544.0f, 4545.0f, 4546.0f, 4547.0f, 4548.0f, 4549.0f, 4550.0f, 4551.0f, 4552.0f, 4553.0f, 4554.0f, 4555.0f, 4556.0f, 4557.0f, 4558.0f, 4559.0f, 4560.0f, 4561.0f, 4562.0f, 4563.0f, 4564.0f, 4565.0f, 4566.0f, 4567.0f, 4568.0f, 4569.0f, 4570.0f, 4571.0f, 4572.0f, 4573.0f, 4574.0f, 4575.0f, 4576.0f, 4577.0f, 4578.0f, 4579.0f, 4580.0f, 4581.0f, 4582.0f, 4583.0f, 4584.0f, 4585.0f, 4586.0f, 4587.0f, 4588.0f, 4589.0f, 4590.0f, 4591.0f, 4592.0f, 4593.0f, 4594.0f, 4595.0f, 4596.0f, 4597.0f, 4598.0f, 4599.0f, 4600.0f, 4601.0f, 4602.0f, 4603.0f, 4604.0f, 4605.0f, 4606.0f, 4607.0f, 4608.0f, 4609.0f, 4610.0f, 4611.0f, 4612.0f, 4613.0f, 4614.0f, 4615.0f, 4616.0f, 4617.0f, 4618.0f, 4619.0f, 4620.0f, 4621.0f, 4622.0f, 4623.0f, 4624.0f, 4625.0f, 4626.0f, 4627.0f, 4628.0f, 4629.0f, 4630.0f, 4631.0f, 4632.0f, 4633.0f, 4634.0f, 4635.0f, 4636.0f, 4637.0f, 4638.0f, 4639.0f, 4640.0f, 4641.0f, 4642.0f, 4643.0f, 4644.0f, 4645.0f, 4646.0f, 4647.0f, 4648.0f, 4649.0f, 4650.0f, 4651.0f, 4652.0f, 4653.0f, 4654.0f, 4655.0f, 4656.0f, 4657.0f, 4658.0f, 4659.0f, 4660.0f, 4661.0f, 4662.0f, 4663.0f, 4664.0f, 4665.0f, 4666.0f, 4667.0f, 4668.0f, 4669.0f, 4670.0f, 4671.0f, 4672.0f, 4673.0f, 4674.0f, 4675.0f, 4676.0f, 4677.0f, 4678.0f, 4679.0f, 4680.0f, 4681.0f, 4682.0f, 4683.0f, 4684.0f, 4685.0f, 4686.0f, 4687.0f, 4688.0f, 4689.0f, 4690.0f, 4691.0f, 4692.0f, 4693.0f, 4694.0f, 4695.0f, 4696.0f, 4697.0f, 4698.0f, 4699.0f, 4700.0f, 4701.0f, 4702.0f, 4703.0f, 4704.0f, 4705.0f, 4706.0f, 4707.0f, 4708.0f, 4709.0f, 4710.0f, 4711.0f, 4712.0f, 4713.0f, 4714.0f, 4715.0f, 4716.0f, 4717.0f, 4718.0f, 4719.0f, 4720.0f, 4721.0f, 4722.0f, 4723.0f, 4724.0f, 4725.0f, 4726.0f, 4727.0f, 4728.0f, 4729.0f, 4730.0f, 4731.0f, 4732.0f, 4733.0f, 4734.0f, 4735.0f, 4736.0f, 4737.0f, 4738.0f, 4739.0f, 4740.0f, 4741.0f, 4742.0f, 4743.0f, 4744.0f, 4745.0f, 4746.0f, 4747.0f, 4748.0f, 4749.0f, 4750.0f, 4751.0f, 4752.0f, 4753.0f, 4754.0f, 4755.0f, 4756.0f, 4757.0f, 4758.0f, 4759.0f, 4760.0f, 4761.0f, 4762.0f, 4763.0f, 4764.0f, 4765.0f, 4766.0f, 4767.0f, 4768.0f, 4769.0f, 4770.0f, 4771.0f, 4772.0f, 4773.0f, 4774.0f, 4775.0f, 4776.0f, 4777.0f, 4778.0f, 4779.0f, 4780.0f, 4781.0f, 4782.0f, 4783.0f, 4784.0f, 4785.0f, 4786.0f, 4787.0f, 4788.0f, 4789.0f, 4790.0f, 4791.0f, 4792.0f, 4793.0f, 4794.0f, 4795.0f, 4796.0f, 4797.0f, 4798.0f, 4799.0f, 4800.0f, 4801.0f, 4802.0f, 4803.0f, 4804.0f, 4805.0f, 4806.0f, 4807.0f, 4808.0f, 4809.0f, 4810.0f, 4811.0f, 4812.0f, 4813.0f, 4814.0f, 4815.0f, 4816.0f, 4817.0f, 4818.0f, 4819.0f, 4820.0f, 4821.0f, 4822.0f, 4823.0f, 4824.0f, 4825.0f, 4826.0f, 4827.0f, 4828.0f, 4829.0f, 4830.0f, 4831.0f, 4832.0f, 4833.0f, 4834.0f, 4835.0f, 4836.0f, 4837.0f, 4838.0f, 4839.0f, 4840.0f, 4841.0f, 4842.0f, 4843.0f, 4844.0f, 4845.0f, 4846.0f, 4847.0f, 4848.0f, 4849.0f, 4850.0f, 4851.0f, 4852.0f, 4853.0f, 4854.0f, 4855.0f, 4856.0f, 4857.0f, 4858.0f, 4859.0f, 4860.0f, 4861.0f, 4862.0f, 4863.0f, 4864.0f, 4865.0f, 4866.0f, 4867.0f, 4868.0f, 4869.0f, 4870.0f, 4871.0f, 4872.0f, 4873.0f, 4874.0f, 4875.0f, 4876.0f, 4877.0f, 4878.0f, 4879.0f, 4880.0f, 4881.0f, 4882.0f, 4883.0f, 4884.0f, 4885.0f, 4886.0f, 4887.0f, 4888.0f, 4889.0f, 4890.0f, 4891.0f, 4892.0f, 4893.0f, 4894.0f, 4895.0f, 4896.0f, 4897.0f, 4898.0f, 4899.0f, 4900.0f, 4901.0f, 4902.0f, 4903.0f, 4904.0f, 4905.0f, 4906.0f, 4907.0f, 4908.0f, 4909.0f, 4910.0f, 4911.0f, 4912.0f, 4913.0f, 4914.0f, 4915.0f, 4916.0f, 4917.0f, 4918.0f, 4919.0f, 4920.0f, 4921.0f, 4922.0f, 4923.0f, 4924.0f, 4925.0f, 4926.0f, 4927.0f, 4928.0f, 4929.0f, 4930.0f, 4931.0f, 4932.0f, 4933.0f, 4934.0f, 4935.0f, 4936.0f, 4937.0f, 4938.0f, 4939.0f, 4940.0f, 4941.0f, 4942.0f, 4943.0f, 4944.0f, 4945.0f, 4946.0f, 4947.0f, 4948.0f, 4949.0f, 4950.0f, 4951.0f, 4952.0f, 4953.0f, 4954.0f, 4955.0f, 4956.0f, 4957.0f, 4958.0f, 4959.0f, 4960.0f, 4961.0f, 4962.0f, 4963.0f, 4964.0f, 4965.0f, 4966.0f, 4967.0f, 4968.0f, 4969.0f, 4970.0f, 4971.0f, 4972.0f, 4973.0f, 4974.0f, 4975.0f, 4976.0f, 4977.0f, 4978.0f, 4979.0f, 4980.0f, 4981.0f, 4982.0f, 4983.0f, 4984.0f, 4985.0f, 4986.0f, 4987.0f, 4988.0f, 4989.0f, 4990.0f, 4991.0f, 4992.0f, 4993.0f, 4994.0f, 4995.0f, 4996.0f, 4997.0f, 4998.0f, 4999.0f, 5000.0f, 5001.0f, 5002.0f, 5003.0f, 5004.0f, 5005.0f, 5006.0f, 5007.0f, 5008.0f, 5009.0f, 5010.0f, 5011.0f, 5012.0f, 5013.0f, 5014.0f, 5015.0f, 5016.0f, 5017.0f, 5018.0f, 5019.0f, 5020.0f, 5021.0f, 5022.0f, 5023.0f, 5024.0f, 5025.0f, 5026.0f, 5027.0f, 5028.0f, 5029.0f, 5030.0f, 5031.0f, 5032.0f, 5033.0f, 5034.0f, 5035.0f, 5036.0f, 5037.0f, 5038.0f, 5039.0f, 5040.0f, 5041.0f, 5042.0f, 5043.0f, 5044.0f, 5045.0f, 5046.0f, 5047.0f, 5048.0f, 5049.0f, 5050.0f, 5051.0f, 5052.0f, 5053.0f, 5054.0f, 5055.0f, 5056.0f, 5057.0f, 5058.0f, 5059.0f, 5060.0f, 5061.0f, 5062.0f, 5063.0f, 5064.0f, 5065.0f, 5066.0f, 5067.0f, 5068.0f, 5069.0f, 5070.0f, 5071.0f, 5072.0f, 5073.0f, 5074.0f, 5075.0f, 5076.0f, 5077.0f, 5078.0f, 5079.0f, 5080.0f, 5081.0f, 5082.0f, 5083.0f, 5084.0f, 5085.0f, 5086.0f, 5087.0f, 5088.0f, 5089.0f, 5090.0f, 5091.0f, 5092.0f, 5093.0f, 5094.0f, 5095.0f, 5096.0f, 5097.0f, 5098.0f, 5099.0f, 5100.0f, 5101.0f, 5102.0f, 5103.0f, 5104.0f, 5105.0f, 5106.0f, 5107.0f, 5108.0f, 5109.0f, 5110.0f, 5111.0f, 5112.0f, 5113.0f, 5114.0f, 5115.0f, 5116.0f, 5117.0f, 5118.0f, 5119.0f, 5120.0f, 5121.0f, 5122.0f, 5123.0f, 5124.0f, 5125.0f, 5126.0f, 5127.0f, 5128.0f, 5129.0f, 5130.0f, 5131.0f, 5132.0f, 5133.0f, 5134.0f, 5135.0f, 5136.0f, 5137.0f, 5138.0f, 5139.0f, 5140.0f, 5141.0f, 5142.0f, 5143.0f, 5144.0f, 5145.0f, 5146.0f, 5147.0f, 5148.0f, 5149.0f, 5150.0f, 5151.0f, 5152.0f, 5153.0f, 5154.0f, 5155.0f, 5156.0f, 5157.0f, 5158.0f, 5159.0f, 5160.0f, 5161.0f, 5162.0f, 5163.0f, 5164.0f, 5165.0f, 5166.0f, 5167.0f, 5168.0f, 5169.0f, 5170.0f, 5171.0f, 5172.0f, 5173.0f, 5174.0f, 5175.0f, 5176.0f, 5177.0f, 5178.0f, 5179.0f, 5180.0f, 5181.0f, 5182.0f, 5183.0f, 5184.0f, 5185.0f, 5186.0f, 5187.0f, 5188.0f, 5189.0f, 5190.0f, 5191.0f, 5192.0f, 5193.0f, 5194.0f, 5195.0f, 5196.0f, 5197.0f, 5198.0f, 5199.0f, 5200.0f, 5201.0f, 5202.0f, 5203.0f, 5204.0f, 5205.0f, 5206.0f, 5207.0f, 5208.0f, 5209.0f, 5210.0f, 5211.0f, 5212.0f, 5213.0f, 5214.0f, 5215.0f, 5216.0f, 5217.0f, 5218.0f, 5219.0f, 5220.0f, 5221.0f, 5222.0f, 5223.0f, 5224.0f, 5225.0f, 5226.0f, 5227.0f, 5228.0f, 5229.0f, 5230.0f, 5231.0f, 5232.0f, 5233.0f, 5234.0f, 5235.0f, 5236.0f, 5237.0f, 5238.0f, 5239.0f, 5240.0f, 5241.0f, 5242.0f, 5243.0f, 5244.0f, 5245.0f, 5246.0f, 5247.0f, 5248.0f, 5249.0f, 5250.0f, 5251.0f, 5252.0f, 5253.0f, 5254.0f, 5255.0f, 5256.0f, 5257.0f, 5258.0f, 5259.0f, 5260.0f, 5261.0f, 5262.0f, 5263.0f, 5264.0f, 5265.0f, 5266.0f, 5267.0f, 5268.0f, 5269.0f, 5270.0f, 5271.0f, 5272.0f, 5273.0f, 5274.0f, 5275.0f, 5276.0f, 5277.0f, 5278.0f, 5279.0f, 5280.0f, 5281.0f, 5282.0f, 5283.0f, 5284.0f, 5285.0f, 5286.0f, 5287.0f, 5288.0f, 5289.0f, 5290.0f, 5291.0f, 5292.0f, 5293.0f, 5294.0f, 5295.0f, 5296.0f, 5297.0f, 5298.0f, 5299.0f, 5300.0f, 5301.0f, 5302.0f, 5303.0f, 5304.0f, 5305.0f, 5306.0f, 5307.0f, 5308.0f, 5309.0f, 5310.0f, 5311.0f, 5312.0f, 5313.0f, 5314.0f, 5315.0f, 5316.0f, 5317.0f, 5318.0f, 5319.0f, 5320.0f, 5321.0f, 5322.0f, 5323.0f, 5324.0f, 5325.0f, 5326.0f, 5327.0f, 5328.0f, 5329.0f, 5330.0f, 5331.0f, 5332.0f, 5333.0f, 5334.0f, 5335.0f, 5336.0f, 5337.0f, 5338.0f, 5339.0f, 5340.0f, 5341.0f, 5342.0f, 5343.0f, 5344.0f, 5345.0f, 5346.0f, 5347.0f, 5348.0f, 5349.0f, 5350.0f, 5351.0f, 5352.0f, 5353.0f, 5354.0f, 5355.0f, 5356.0f, 5357.0f, 5358.0f, 5359.0f, 5360.0f, 5361.0f, 5362.0f, 5363.0f, 5364.0f, 5365.0f, 5366.0f, 5367.0f, 5368.0f, 5369.0f, 5370.0f, 5371.0f, 5372.0f, 5373.0f, 5374.0f, 5375.0f, 5376.0f, 5377.0f, 5378.0f, 5379.0f, 5380.0f, 5381.0f, 5382.0f, 5383.0f, 5384.0f, 5385.0f, 5386.0f, 5387.0f, 5388.0f, 5389.0f, 5390.0f, 5391.0f, 5392.0f, 5393.0f, 5394.0f, 5395.0f, 5396.0f, 5397.0f, 5398.0f, 5399.0f, 5400.0f, 5401.0f, 5402.0f, 5403.0f, 5404.0f, 5405.0f, 5406.0f, 5407.0f, 5408.0f, 5409.0f, 5410.0f, 5411.0f, 5412.0f, 5413.0f, 5414.0f, 5415.0f, 5416.0f, 5417.0f, 5418.0f, 5419.0f, 5420.0f, 5421.0f, 5422.0f, 5423.0f, 5424.0f, 5425.0f, 5426.0f, 5427.0f, 5428.0f, 5429.0f, 5430.0f, 5431.0f, 5432.0f, 5433.0f, 5434.0f, 5435.0f, 5436.0f, 5437.0f, 5438.0f, 5439.0f, 5440.0f, 5441.0f, 5442.0f, 5443.0f, 5444.0f, 5445.0f, 5446.0f, 5447.0f, 5448.0f, 5449.0f, 5450.0f, 5451.0f, 5452.0f, 5453.0f, 5454.0f, 5455.0f, 5456.0f, 5457.0f, 5458.0f, 5459.0f, 5460.0f, 5461.0f, 5462.0f, 5463.0f, 5464.0f, 5465.0f, 5466.0f, 5467.0f, 5468.0f, 5469.0f, 5470.0f, 5471.0f, 5472.0f, 5473.0f, 5474.0f, 5475.0f, 5476.0f, 5477.0f, 5478.0f, 5479.0f, 5480.0f, 5481.0f, 5482.0f, 5483.0f, 5484.0f, 5485.0f, 5486.0f, 5487.0f, 5488.0f, 5489.0f, 5490.0f, 5491.0f, 5492.0f, 5493.0f, 5494.0f, 5495.0f, 5496.0f, 5497.0f, 5498.0f, 5499.0f, 5500.0f, 5501.0f, 5502.0f, 5503.0f, 5504.0f, 5505.0f, 5506.0f, 5507.0f, 5508.0f, 5509.0f, 5510.0f, 5511.0f, 5512.0f, 5513.0f, 5514.0f, 5515.0f, 5516.0f, 5517.0f, 5518.0f, 5519.0f, 5520.0f, 5521.0f, 5522.0f, 5523.0f, 5524.0f, 5525.0f, 5526.0f, 5527.0f, 5528.0f, 5529.0f, 5530.0f, 5531.0f, 5532.0f, 5533.0f, 5534.0f, 5535.0f, 5536.0f, 5537.0f, 5538.0f, 5539.0f, 5540.0f, 5541.0f, 5542.0f, 5543.0f, 5544.0f, 5545.0f, 5546.0f, 5547.0f, 5548.0f, 5549.0f, 5550.0f, 5551.0f, 5552.0f, 5553.0f, 5554.0f, 5555.0f, 5556.0f, 5557.0f, 5558.0f, 5559.0f, 5560.0f, 5561.0f, 5562.0f, 5563.0f, 5564.0f, 5565.0f, 5566.0f, 5567.0f, 5568.0f, 5569.0f, 5570.0f, 5571.0f, 5572.0f, 5573.0f, 5574.0f, 5575.0f, 5576.0f, 5577.0f, 5578.0f, 5579.0f, 5580.0f, 5581.0f, 5582.0f, 5583.0f, 5584.0f, 5585.0f, 5586.0f, 5587.0f, 5588.0f, 5589.0f, 5590.0f, 5591.0f, 5592.0f, 5593.0f, 5594.0f, 5595.0f, 5596.0f, 5597.0f, 5598.0f, 5599.0f, 5600.0f, 5601.0f, 5602.0f, 5603.0f, 5604.0f, 5605.0f, 5606.0f, 5607.0f, 5608.0f, 5609.0f, 5610.0f, 5611.0f, 5612.0f, 5613.0f, 5614.0f, 5615.0f, 5616.0f, 5617.0f, 5618.0f, 5619.0f, 5620.0f, 5621.0f, 5622.0f, 5623.0f, 5624.0f, 5625.0f, 5626.0f, 5627.0f, 5628.0f, 5629.0f, 5630.0f, 5631.0f, 5632.0f, 5633.0f, 5634.0f, 5635.0f, 5636.0f, 5637.0f, 5638.0f, 5639.0f, 5640.0f, 5641.0f, 5642.0f, 5643.0f, 5644.0f, 5645.0f, 5646.0f, 5647.0f, 5648.0f, 5649.0f, 5650.0f, 5651.0f, 5652.0f, 5653.0f, 5654.0f, 5655.0f, 5656.0f, 5657.0f, 5658.0f, 5659.0f, 5660.0f, 5661.0f, 5662.0f, 5663.0f, 5664.0f, 5665.0f, 5666.0f, 5667.0f, 5668.0f, 5669.0f, 5670.0f, 5671.0f, 5672.0f, 5673.0f, 5674.0f, 5675.0f, 5676.0f, 5677.0f, 5678.0f, 5679.0f, 5680.0f, 5681.0f, 5682.0f, 5683.0f, 5684.0f, 5685.0f, 5686.0f, 5687.0f, 5688.0f, 5689.0f, 5690.0f, 5691.0f, 5692.0f, 5693.0f, 5694.0f, 5695.0f, 5696.0f, 5697.0f, 5698.0f, 5699.0f, 5700.0f, 5701.0f, 5702.0f, 5703.0f, 5704.0f, 5705.0f, 5706.0f, 5707.0f, 5708.0f, 5709.0f, 5710.0f, 5711.0f, 5712.0f, 5713.0f, 5714.0f, 5715.0f, 5716.0f, 5717.0f, 5718.0f, 5719.0f, 5720.0f, 5721.0f, 5722.0f, 5723.0f, 5724.0f, 5725.0f, 5726.0f, 5727.0f, 5728.0f, 5729.0f, 5730.0f, 5731.0f, 5732.0f, 5733.0f, 5734.0f, 5735.0f, 5736.0f, 5737.0f, 5738.0f, 5739.0f, 5740.0f, 5741.0f, 5742.0f, 5743.0f, 5744.0f, 5745.0f, 5746.0f, 5747.0f, 5748.0f, 5749.0f, 5750.0f, 5751.0f, 5752.0f, 5753.0f, 5754.0f, 5755.0f, 5756.0f, 5757.0f, 5758.0f, 5759.0f, 5760.0f, 5761.0f, 5762.0f, 5763.0f, 5764.0f, 5765.0f, 5766.0f, 5767.0f, 5768.0f, 5769.0f, 5770.0f, 5771.0f, 5772.0f, 5773.0f, 5774.0f, 5775.0f, 5776.0f, 5777.0f, 5778.0f, 5779.0f, 5780.0f, 5781.0f, 5782.0f, 5783.0f, 5784.0f, 5785.0f, 5786.0f, 5787.0f, 5788.0f, 5789.0f, 5790.0f, 5791.0f, 5792.0f, 5793.0f, 5794.0f, 5795.0f, 5796.0f, 5797.0f, 5798.0f, 5799.0f, 5800.0f, 5801.0f, 5802.0f, 5803.0f, 5804.0f, 5805.0f, 5806.0f, 5807.0f, 5808.0f, 5809.0f, 5810.0f, 5811.0f, 5812.0f, 5813.0f, 5814.0f, 5815.0f, 5816.0f, 5817.0f, 5818.0f, 5819.0f, 5820.0f, 5821.0f, 5822.0f, 5823.0f, 5824.0f, 5825.0f, 5826.0f, 5827.0f, 5828.0f, 5829.0f, 5830.0f, 5831.0f, 5832.0f, 5833.0f, 5834.0f, 5835.0f, 5836.0f, 5837.0f, 5838.0f, 5839.0f, 5840.0f, 5841.0f, 5842.0f, 5843.0f, 5844.0f, 5845.0f, 5846.0f, 5847.0f, 5848.0f, 5849.0f, 5850.0f, 5851.0f, 5852.0f, 5853.0f, 5854.0f, 5855.0f, 5856.0f, 5857.0f, 5858.0f, 5859.0f, 5860.0f, 5861.0f, 5862.0f, 5863.0f, 5864.0f, 5865.0f, 5866.0f, 5867.0f, 5868.0f, 5869.0f, 5870.0f, 5871.0f, 5872.0f, 5873.0f, 5874.0f, 5875.0f, 5876.0f, 5877.0f, 5878.0f, 5879.0f, 5880.0f, 5881.0f, 5882.0f, 5883.0f, 5884.0f, 5885.0f, 5886.0f, 5887.0f, 5888.0f, 5889.0f, 5890.0f, 5891.0f, 5892.0f, 5893.0f, 5894.0f, 5895.0f, 5896.0f, 5897.0f, 5898.0f, 5899.0f, 5900.0f, 5901.0f, 5902.0f, 5903.0f, 5904.0f, 5905.0f, 5906.0f, 5907.0f, 5908.0f, 5909.0f, 5910.0f, 5911.0f, 5912.0f, 5913.0f, 5914.0f, 5915.0f, 5916.0f, 5917.0f, 5918.0f, 5919.0f, 5920.0f, 5921.0f, 5922.0f, 5923.0f, 5924.0f, 5925.0f, 5926.0f, 5927.0f, 5928.0f, 5929.0f, 5930.0f, 5931.0f, 5932.0f, 5933.0f, 5934.0f, 5935.0f, 5936.0f, 5937.0f, 5938.0f, 5939.0f, 5940.0f, 5941.0f, 5942.0f, 5943.0f, 5944.0f, 5945.0f, 5946.0f, 5947.0f, 5948.0f, 5949.0f, 5950.0f, 5951.0f, 5952.0f, 5953.0f, 5954.0f, 5955.0f, 5956.0f, 5957.0f, 5958.0f, 5959.0f, 5960.0f, 5961.0f, 5962.0f, 5963.0f, 5964.0f, 5965.0f, 5966.0f, 5967.0f, 5968.0f, 5969.0f, 5970.0f, 5971.0f, 5972.0f, 5973.0f, 5974.0f, 5975.0f, 5976.0f, 5977.0f, 5978.0f, 5979.0f, 5980.0f, 5981.0f, 5982.0f, 5983.0f, 5984.0f, 5985.0f, 5986.0f, 5987.0f, 5988.0f, 5989.0f, 5990.0f, 5991.0f, 5992.0f, 5993.0f, 5994.0f, 5995.0f, 5996.0f, 5997.0f, 5998.0f, 5999.0f, 6000.0f, 6001.0f, 6002.0f, 6003.0f, 6004.0f, 6005.0f, 6006.0f, 6007.0f, 6008.0f, 6009.0f, 6010.0f, 6011.0f, 6012.0f, 6013.0f, 6014.0f, 6015.0f, 6016.0f, 6017.0f, 6018.0f, 6019.0f, 6020.0f, 6021.0f, 6022.0f, 6023.0f, 6024.0f, 6025.0f, 6026.0f, 6027.0f, 6028.0f, 6029.0f, 6030.0f, 6031.0f, 6032.0f, 6033.0f, 6034.0f, 6035.0f, 6036.0f, 6037.0f, 6038.0f, 6039.0f, 6040.0f, 6041.0f, 6042.0f, 6043.0f, 6044.0f, 6045.0f, 6046.0f, 6047.0f, 6048.0f, 6049.0f, 6050.0f, 6051.0f, 6052.0f, 6053.0f, 6054.0f, 6055.0f, 6056.0f, 6057.0f, 6058.0f, 6059.0f, 6060.0f, 6061.0f, 6062.0f, 6063.0f, 6064.0f, 6065.0f, 6066.0f, 6067.0f, 6068.0f, 6069.0f, 6070.0f, 6071.0f, 6072.0f, 6073.0f, 6074.0f, 6075.0f, 6076.0f, 6077.0f, 6078.0f, 6079.0f, 6080.0f, 6081.0f, 6082.0f, 6083.0f, 6084.0f, 6085.0f, 6086.0f, 6087.0f, 6088.0f, 6089.0f, 6090.0f, 6091.0f, 6092.0f, 6093.0f, 6094.0f, 6095.0f, 6096.0f, 6097.0f, 6098.0f, 6099.0f, 6100.0f, 6101.0f, 6102.0f, 6103.0f, 6104.0f, 6105.0f, 6106.0f, 6107.0f, 6108.0f, 6109.0f, 6110.0f, 6111.0f, 6112.0f, 6113.0f, 6114.0f, 6115.0f, 6116.0f, 6117.0f, 6118.0f, 6119.0f, 6120.0f, 6121.0f, 6122.0f, 6123.0f, 6124.0f, 6125.0f, 6126.0f, 6127.0f, 6128.0f, 6129.0f, 6130.0f, 6131.0f, 6132.0f, 6133.0f, 6134.0f, 6135.0f, 6136.0f, 6137.0f, 6138.0f, 6139.0f, 6140.0f, 6141.0f, 6142.0f, 6143.0f, 6144.0f, 6145.0f, 6146.0f, 6147.0f, 6148.0f, 6149.0f, 6150.0f, 6151.0f, 6152.0f, 6153.0f, 6154.0f, 6155.0f, 6156.0f, 6157.0f, 6158.0f, 6159.0f, 6160.0f, 6161.0f, 6162.0f, 6163.0f, 6164.0f, 6165.0f, 6166.0f, 6167.0f, 6168.0f, 6169.0f, 6170.0f, 6171.0f, 6172.0f, 6173.0f, 6174.0f, 6175.0f, 6176.0f, 6177.0f, 6178.0f, 6179.0f, 6180.0f, 6181.0f, 6182.0f, 6183.0f, 6184.0f, 6185.0f, 6186.0f, 6187.0f, 6188.0f, 6189.0f, 6190.0f, 6191.0f, 6192.0f, 6193.0f, 6194.0f, 6195.0f, 6196.0f, 6197.0f, 6198.0f, 6199.0f, 6200.0f, 6201.0f, 6202.0f, 6203.0f, 6204.0f, 6205.0f, 6206.0f, 6207.0f, 6208.0f, 6209.0f, 6210.0f, 6211.0f, 6212.0f, 6213.0f, 6214.0f, 6215.0f, 6216.0f, 6217.0f, 6218.0f, 6219.0f, 6220.0f, 6221.0f, 6222.0f, 6223.0f, 6224.0f, 6225.0f, 6226.0f, 6227.0f, 6228.0f, 6229.0f, 6230.0f, 6231.0f, 6232.0f, 6233.0f, 6234.0f, 6235.0f, 6236.0f, 6237.0f, 6238.0f, 6239.0f, 6240.0f, 6241.0f, 6242.0f, 6243.0f, 6244.0f, 6245.0f, 6246.0f, 6247.0f, 6248.0f, 6249.0f, 6250.0f, 6251.0f, 6252.0f, 6253.0f, 6254.0f, 6255.0f, 6256.0f, 6257.0f, 6258.0f, 6259.0f, 6260.0f, 6261.0f, 6262.0f, 6263.0f, 6264.0f, 6265.0f, 6266.0f, 6267.0f, 6268.0f, 6269.0f, 6270.0f, 6271.0f, 6272.0f, 6273.0f, 6274.0f, 6275.0f, 6276.0f, 6277.0f, 6278.0f, 6279.0f, 6280.0f, 6281.0f, 6282.0f, 6283.0f, 6284.0f, 6285.0f, 6286.0f, 6287.0f, 6288.0f, 6289.0f, 6290.0f, 6291.0f, 6292.0f, 6293.0f, 6294.0f, 6295.0f, 6296.0f, 6297.0f, 6298.0f, 6299.0f, 6300.0f, 6301.0f, 6302.0f, 6303.0f, 6304.0f, 6305.0f, 6306.0f, 6307.0f, 6308.0f, 6309.0f, 6310.0f, 6311.0f, 6312.0f, 6313.0f, 6314.0f, 6315.0f, 6316.0f, 6317.0f, 6318.0f, 6319.0f, 6320.0f, 6321.0f, 6322.0f, 6323.0f, 6324.0f, 6325.0f, 6326.0f, 6327.0f, 6328.0f, 6329.0f, 6330.0f, 6331.0f, 6332.0f, 6333.0f, 6334.0f, 6335.0f, 6336.0f, 6337.0f, 6338.0f, 6339.0f, 6340.0f, 6341.0f, 6342.0f, 6343.0f, 6344.0f, 6345.0f, 6346.0f, 6347.0f, 6348.0f, 6349.0f, 6350.0f, 6351.0f, 6352.0f, 6353.0f, 6354.0f, 6355.0f, 6356.0f, 6357.0f, 6358.0f, 6359.0f, 6360.0f, 6361.0f, 6362.0f, 6363.0f, 6364.0f, 6365.0f, 6366.0f, 6367.0f, 6368.0f, 6369.0f, 6370.0f, 6371.0f, 6372.0f, 6373.0f, 6374.0f, 6375.0f, 6376.0f, 6377.0f, 6378.0f, 6379.0f, 6380.0f, 6381.0f, 6382.0f, 6383.0f, 6384.0f, 6385.0f, 6386.0f, 6387.0f, 6388.0f, 6389.0f, 6390.0f, 6391.0f, 6392.0f, 6393.0f, 6394.0f, 6395.0f, 6396.0f, 6397.0f, 6398.0f, 6399.0f, 6400.0f, 6401.0f, 6402.0f, 6403.0f, 6404.0f, 6405.0f, 6406.0f, 6407.0f, 6408.0f, 6409.0f, 6410.0f, 6411.0f, 6412.0f, 6413.0f, 6414.0f, 6415.0f, 6416.0f, 6417.0f, 6418.0f, 6419.0f, 6420.0f, 6421.0f, 6422.0f, 6423.0f, 6424.0f, 6425.0f, 6426.0f, 6427.0f, 6428.0f, 6429.0f, 6430.0f, 6431.0f, 6432.0f, 6433.0f, 6434.0f, 6435.0f, 6436.0f, 6437.0f, 6438.0f, 6439.0f, 6440.0f, 6441.0f, 6442.0f, 6443.0f, 6444.0f, 6445.0f, 6446.0f, 6447.0f, 6448.0f, 6449.0f, 6450.0f, 6451.0f, 6452.0f, 6453.0f, 6454.0f, 6455.0f, 6456.0f, 6457.0f, 6458.0f, 6459.0f, 6460.0f, 6461.0f, 6462.0f, 6463.0f, 6464.0f, 6465.0f, 6466.0f, 6467.0f, 6468.0f, 6469.0f, 6470.0f, 6471.0f, 6472.0f, 6473.0f, 6474.0f, 6475.0f, 6476.0f, 6477.0f, 6478.0f, 6479.0f, 6480.0f, 6481.0f, 6482.0f, 6483.0f, 6484.0f, 6485.0f, 6486.0f, 6487.0f, 6488.0f, 6489.0f, 6490.0f, 6491.0f, 6492.0f, 6493.0f, 6494.0f, 6495.0f, 6496.0f, 6497.0f, 6498.0f, 6499.0f, 6500.0f, 6501.0f, 6502.0f, 6503.0f, 6504.0f, 6505.0f, 6506.0f, 6507.0f, 6508.0f, 6509.0f, 6510.0f, 6511.0f, 6512.0f, 6513.0f, 6514.0f, 6515.0f, 6516.0f, 6517.0f, 6518.0f, 6519.0f, 6520.0f, 6521.0f, 6522.0f, 6523.0f, 6524.0f, 6525.0f, 6526.0f, 6527.0f, 6528.0f, 6529.0f, 6530.0f, 6531.0f, 6532.0f, 6533.0f, 6534.0f, 6535.0f, 6536.0f, 6537.0f, 6538.0f, 6539.0f, 6540.0f, 6541.0f, 6542.0f, 6543.0f, 6544.0f, 6545.0f, 6546.0f, 6547.0f, 6548.0f, 6549.0f, 6550.0f, 6551.0f, 6552.0f, 6553.0f, 6554.0f, 6555.0f, 6556.0f, 6557.0f, 6558.0f, 6559.0f, 6560.0f, 6561.0f, 6562.0f, 6563.0f, 6564.0f, 6565.0f, 6566.0f, 6567.0f, 6568.0f, 6569.0f, 6570.0f, 6571.0f, 6572.0f, 6573.0f, 6574.0f, 6575.0f, 6576.0f, 6577.0f, 6578.0f, 6579.0f, 6580.0f, 6581.0f, 6582.0f, 6583.0f, 6584.0f, 6585.0f, 6586.0f, 6587.0f, 6588.0f, 6589.0f, 6590.0f, 6591.0f, 6592.0f, 6593.0f, 6594.0f, 6595.0f, 6596.0f, 6597.0f, 6598.0f, 6599.0f, 6600.0f, 6601.0f, 6602.0f, 6603.0f, 6604.0f, 6605.0f, 6606.0f, 6607.0f, 6608.0f, 6609.0f, 6610.0f, 6611.0f, 6612.0f, 6613.0f, 6614.0f, 6615.0f, 6616.0f, 6617.0f, 6618.0f, 6619.0f, 6620.0f, 6621.0f, 6622.0f, 6623.0f, 6624.0f, 6625.0f, 6626.0f, 6627.0f, 6628.0f, 6629.0f, 6630.0f, 6631.0f, 6632.0f, 6633.0f, 6634.0f, 6635.0f, 6636.0f, 6637.0f, 6638.0f, 6639.0f, 6640.0f, 6641.0f, 6642.0f, 6643.0f, 6644.0f, 6645.0f, 6646.0f, 6647.0f, 6648.0f, 6649.0f, 6650.0f, 6651.0f, 6652.0f, 6653.0f, 6654.0f, 6655.0f, 6656.0f, 6657.0f, 6658.0f, 6659.0f, 6660.0f, 6661.0f, 6662.0f, 6663.0f, 6664.0f, 6665.0f, 6666.0f, 6667.0f, 6668.0f, 6669.0f, 6670.0f, 6671.0f, 6672.0f, 6673.0f, 6674.0f, 6675.0f, 6676.0f, 6677.0f, 6678.0f, 6679.0f, 6680.0f, 6681.0f, 6682.0f, 6683.0f, 6684.0f, 6685.0f, 6686.0f, 6687.0f, 6688.0f, 6689.0f, 6690.0f, 6691.0f, 6692.0f, 6693.0f, 6694.0f, 6695.0f, 6696.0f, 6697.0f, 6698.0f, 6699.0f, 6700.0f, 6701.0f, 6702.0f, 6703.0f, 6704.0f, 6705.0f, 6706.0f, 6707.0f, 6708.0f, 6709.0f, 6710.0f, 6711.0f, 6712.0f, 6713.0f, 6714.0f, 6715.0f, 6716.0f, 6717.0f, 6718.0f, 6719.0f, 6720.0f, 6721.0f, 6722.0f, 6723.0f, 6724.0f, 6725.0f, 6726.0f, 6727.0f, 6728.0f, 6729.0f, 6730.0f, 6731.0f, 6732.0f, 6733.0f, 6734.0f, 6735.0f, 6736.0f, 6737.0f, 6738.0f, 6739.0f, 6740.0f, 6741.0f, 6742.0f, 6743.0f, 6744.0f, 6745.0f, 6746.0f, 6747.0f, 6748.0f, 6749.0f, 6750.0f, 6751.0f, 6752.0f, 6753.0f, 6754.0f, 6755.0f, 6756.0f, 6757.0f, 6758.0f, 6759.0f, 6760.0f, 6761.0f, 6762.0f, 6763.0f, 6764.0f, 6765.0f, 6766.0f, 6767.0f, 6768.0f, 6769.0f, 6770.0f, 6771.0f, 6772.0f, 6773.0f, 6774.0f, 6775.0f, 6776.0f, 6777.0f, 6778.0f, 6779.0f, 6780.0f, 6781.0f, 6782.0f, 6783.0f, 6784.0f, 6785.0f, 6786.0f, 6787.0f, 6788.0f, 6789.0f, 6790.0f, 6791.0f, 6792.0f, 6793.0f, 6794.0f, 6795.0f, 6796.0f, 6797.0f, 6798.0f, 6799.0f, 6800.0f, 6801.0f, 6802.0f, 6803.0f, 6804.0f, 6805.0f, 6806.0f, 6807.0f, 6808.0f, 6809.0f, 6810.0f, 6811.0f, 6812.0f, 6813.0f, 6814.0f, 6815.0f, 6816.0f, 6817.0f, 6818.0f, 6819.0f, 6820.0f, 6821.0f, 6822.0f, 6823.0f, 6824.0f, 6825.0f, 6826.0f, 6827.0f, 6828.0f, 6829.0f, 6830.0f, 6831.0f, 6832.0f, 6833.0f, 6834.0f, 6835.0f, 6836.0f, 6837.0f, 6838.0f, 6839.0f, 6840.0f, 6841.0f, 6842.0f, 6843.0f, 6844.0f, 6845.0f, 6846.0f, 6847.0f, 6848.0f, 6849.0f, 6850.0f, 6851.0f, 6852.0f, 6853.0f, 6854.0f, 6855.0f, 6856.0f, 6857.0f, 6858.0f, 6859.0f, 6860.0f, 6861.0f, 6862.0f, 6863.0f, 6864.0f, 6865.0f, 6866.0f, 6867.0f, 6868.0f, 6869.0f, 6870.0f, 6871.0f, 6872.0f, 6873.0f, 6874.0f, 6875.0f, 6876.0f, 6877.0f, 6878.0f, 6879.0f, 6880.0f, 6881.0f, 6882.0f, 6883.0f, 6884.0f, 6885.0f, 6886.0f, 6887.0f, 6888.0f, 6889.0f, 6890.0f, 6891.0f, 6892.0f, 6893.0f, 6894.0f, 6895.0f, 6896.0f, 6897.0f, 6898.0f, 6899.0f, 6900.0f, 6901.0f, 6902.0f, 6903.0f, 6904.0f, 6905.0f, 6906.0f, 6907.0f, 6908.0f, 6909.0f, 6910.0f, 6911.0f, 6912.0f, 6913.0f, 6914.0f, 6915.0f, 6916.0f, 6917.0f, 6918.0f, 6919.0f, 6920.0f, 6921.0f, 6922.0f, 6923.0f, 6924.0f, 6925.0f, 6926.0f, 6927.0f, 6928.0f, 6929.0f, 6930.0f, 6931.0f, 6932.0f, 6933.0f, 6934.0f, 6935.0f, 6936.0f, 6937.0f, 6938.0f, 6939.0f, 6940.0f, 6941.0f, 6942.0f, 6943.0f, 6944.0f, 6945.0f, 6946.0f, 6947.0f, 6948.0f, 6949.0f, 6950.0f, 6951.0f, 6952.0f, 6953.0f, 6954.0f, 6955.0f, 6956.0f, 6957.0f, 6958.0f, 6959.0f, 6960.0f, 6961.0f, 6962.0f, 6963.0f, 6964.0f, 6965.0f, 6966.0f, 6967.0f, 6968.0f, 6969.0f, 6970.0f, 6971.0f, 6972.0f, 6973.0f, 6974.0f, 6975.0f, 6976.0f, 6977.0f, 6978.0f, 6979.0f, 6980.0f, 6981.0f, 6982.0f, 6983.0f, 6984.0f, 6985.0f, 6986.0f, 6987.0f, 6988.0f, 6989.0f, 6990.0f, 6991.0f, 6992.0f, 6993.0f, 6994.0f, 6995.0f, 6996.0f, 6997.0f, 6998.0f, 6999.0f, 7000.0f, 7001.0f, 7002.0f, 7003.0f, 7004.0f, 7005.0f, 7006.0f, 7007.0f, 7008.0f, 7009.0f, 7010.0f, 7011.0f, 7012.0f, 7013.0f, 7014.0f, 7015.0f, 7016.0f, 7017.0f, 7018.0f, 7019.0f, 7020.0f, 7021.0f, 7022.0f, 7023.0f, 7024.0f, 7025.0f, 7026.0f, 7027.0f, 7028.0f, 7029.0f, 7030.0f, 7031.0f, 7032.0f, 7033.0f, 7034.0f, 7035.0f, 7036.0f, 7037.0f, 7038.0f, 7039.0f, 7040.0f, 7041.0f, 7042.0f, 7043.0f, 7044.0f, 7045.0f, 7046.0f, 7047.0f, 7048.0f, 7049.0f, 7050.0f, 7051.0f, 7052.0f, 7053.0f, 7054.0f, 7055.0f, 7056.0f, 7057.0f, 7058.0f, 7059.0f, 7060.0f, 7061.0f, 7062.0f, 7063.0f, 7064.0f, 7065.0f, 7066.0f, 7067.0f, 7068.0f, 7069.0f, 7070.0f, 7071.0f, 7072.0f, 7073.0f, 7074.0f, 7075.0f, 7076.0f, 7077.0f, 7078.0f, 7079.0f, 7080.0f, 7081.0f, 7082.0f, 7083.0f, 7084.0f, 7085.0f, 7086.0f, 7087.0f, 7088.0f, 7089.0f, 7090.0f, 7091.0f, 7092.0f, 7093.0f, 7094.0f, 7095.0f, 7096.0f, 7097.0f, 7098.0f, 7099.0f, 7100.0f, 7101.0f, 7102.0f, 7103.0f, 7104.0f, 7105.0f, 7106.0f, 7107.0f, 7108.0f, 7109.0f, 7110.0f, 7111.0f, 7112.0f, 7113.0f, 7114.0f, 7115.0f, 7116.0f, 7117.0f, 7118.0f, 7119.0f, 7120.0f, 7121.0f, 7122.0f, 7123.0f, 7124.0f, 7125.0f, 7126.0f, 7127.0f, 7128.0f, 7129.0f, 7130.0f, 7131.0f, 7132.0f, 7133.0f, 7134.0f, 7135.0f, 7136.0f, 7137.0f, 7138.0f, 7139.0f, 7140.0f, 7141.0f, 7142.0f, 7143.0f, 7144.0f, 7145.0f, 7146.0f, 7147.0f, 7148.0f, 7149.0f, 7150.0f, 7151.0f, 7152.0f, 7153.0f, 7154.0f, 7155.0f, 7156.0f, 7157.0f, 7158.0f, 7159.0f, 7160.0f, 7161.0f, 7162.0f, 7163.0f, 7164.0f, 7165.0f, 7166.0f, 7167.0f, 7168.0f, 7169.0f, 7170.0f, 7171.0f, 7172.0f, 7173.0f, 7174.0f, 7175.0f, 7176.0f, 7177.0f, 7178.0f, 7179.0f, 7180.0f, 7181.0f, 7182.0f, 7183.0f, 7184.0f, 7185.0f, 7186.0f, 7187.0f, 7188.0f, 7189.0f, 7190.0f, 7191.0f, 7192.0f, 7193.0f, 7194.0f, 7195.0f, 7196.0f, 7197.0f, 7198.0f, 7199.0f, 7200.0f, 7201.0f, 7202.0f, 7203.0f, 7204.0f, 7205.0f, 7206.0f, 7207.0f, 7208.0f, 7209.0f, 7210.0f, 7211.0f, 7212.0f, 7213.0f, 7214.0f, 7215.0f, 7216.0f, 7217.0f, 7218.0f, 7219.0f, 7220.0f, 7221.0f, 7222.0f, 7223.0f, 7224.0f, 7225.0f, 7226.0f, 7227.0f, 7228.0f, 7229.0f, 7230.0f, 7231.0f, 7232.0f, 7233.0f, 7234.0f, 7235.0f, 7236.0f, 7237.0f, 7238.0f, 7239.0f, 7240.0f, 7241.0f, 7242.0f, 7243.0f, 7244.0f, 7245.0f, 7246.0f, 7247.0f, 7248.0f, 7249.0f, 7250.0f, 7251.0f, 7252.0f, 7253.0f, 7254.0f, 7255.0f, 7256.0f, 7257.0f, 7258.0f, 7259.0f, 7260.0f, 7261.0f, 7262.0f, 7263.0f, 7264.0f, 7265.0f, 7266.0f, 7267.0f, 7268.0f, 7269.0f, 7270.0f, 7271.0f, 7272.0f, 7273.0f, 7274.0f, 7275.0f, 7276.0f, 7277.0f, 7278.0f, 7279.0f, 7280.0f, 7281.0f, 7282.0f, 7283.0f, 7284.0f, 7285.0f, 7286.0f, 7287.0f, 7288.0f, 7289.0f, 7290.0f, 7291.0f, 7292.0f, 7293.0f, 7294.0f, 7295.0f, 7296.0f, 7297.0f, 7298.0f, 7299.0f, 7300.0f, 7301.0f, 7302.0f, 7303.0f, 7304.0f, 7305.0f, 7306.0f, 7307.0f, 7308.0f, 7309.0f, 7310.0f, 7311.0f, 7312.0f, 7313.0f, 7314.0f, 7315.0f, 7316.0f, 7317.0f, 7318.0f, 7319.0f, 7320.0f, 7321.0f, 7322.0f, 7323.0f, 7324.0f, 7325.0f, 7326.0f, 7327.0f, 7328.0f, 7329.0f, 7330.0f, 7331.0f, 7332.0f, 7333.0f, 7334.0f, 7335.0f, 7336.0f, 7337.0f, 7338.0f, 7339.0f, 7340.0f, 7341.0f, 7342.0f, 7343.0f, 7344.0f, 7345.0f, 7346.0f, 7347.0f, 7348.0f, 7349.0f, 7350.0f, 7351.0f, 7352.0f, 7353.0f, 7354.0f, 7355.0f, 7356.0f, 7357.0f, 7358.0f, 7359.0f, 7360.0f, 7361.0f, 7362.0f, 7363.0f, 7364.0f, 7365.0f, 7366.0f, 7367.0f, 7368.0f, 7369.0f, 7370.0f, 7371.0f, 7372.0f, 7373.0f, 7374.0f, 7375.0f, 7376.0f, 7377.0f, 7378.0f, 7379.0f, 7380.0f, 7381.0f, 7382.0f, 7383.0f, 7384.0f, 7385.0f, 7386.0f, 7387.0f, 7388.0f, 7389.0f, 7390.0f, 7391.0f, 7392.0f, 7393.0f, 7394.0f, 7395.0f, 7396.0f, 7397.0f, 7398.0f, 7399.0f, 7400.0f, 7401.0f, 7402.0f, 7403.0f, 7404.0f, 7405.0f, 7406.0f, 7407.0f, 7408.0f, 7409.0f, 7410.0f, 7411.0f, 7412.0f, 7413.0f, 7414.0f, 7415.0f, 7416.0f, 7417.0f, 7418.0f, 7419.0f, 7420.0f, 7421.0f, 7422.0f, 7423.0f, 7424.0f, 7425.0f, 7426.0f, 7427.0f, 7428.0f, 7429.0f, 7430.0f, 7431.0f, 7432.0f, 7433.0f, 7434.0f, 7435.0f, 7436.0f, 7437.0f, 7438.0f, 7439.0f, 7440.0f, 7441.0f, 7442.0f, 7443.0f, 7444.0f, 7445.0f, 7446.0f, 7447.0f, 7448.0f, 7449.0f, 7450.0f, 7451.0f, 7452.0f, 7453.0f, 7454.0f, 7455.0f, 7456.0f, 7457.0f, 7458.0f, 7459.0f, 7460.0f, 7461.0f, 7462.0f, 7463.0f, 7464.0f, 7465.0f, 7466.0f, 7467.0f, 7468.0f, 7469.0f, 7470.0f, 7471.0f, 7472.0f, 7473.0f, 7474.0f, 7475.0f, 7476.0f, 7477.0f, 7478.0f, 7479.0f, 7480.0f, 7481.0f, 7482.0f, 7483.0f, 7484.0f, 7485.0f, 7486.0f, 7487.0f, 7488.0f, 7489.0f, 7490.0f, 7491.0f, 7492.0f, 7493.0f, 7494.0f, 7495.0f, 7496.0f, 7497.0f, 7498.0f, 7499.0f, 7500.0f, 7501.0f, 7502.0f, 7503.0f, 7504.0f, 7505.0f, 7506.0f, 7507.0f, 7508.0f, 7509.0f, 7510.0f, 7511.0f, 7512.0f, 7513.0f, 7514.0f, 7515.0f, 7516.0f, 7517.0f, 7518.0f, 7519.0f, 7520.0f, 7521.0f, 7522.0f, 7523.0f, 7524.0f, 7525.0f, 7526.0f, 7527.0f, 7528.0f, 7529.0f, 7530.0f, 7531.0f, 7532.0f, 7533.0f, 7534.0f, 7535.0f, 7536.0f, 7537.0f, 7538.0f, 7539.0f, 7540.0f, 7541.0f, 7542.0f, 7543.0f, 7544.0f, 7545.0f, 7546.0f, 7547.0f, 7548.0f, 7549.0f, 7550.0f, 7551.0f, 7552.0f, 7553.0f, 7554.0f, 7555.0f, 7556.0f, 7557.0f, 7558.0f, 7559.0f, 7560.0f, 7561.0f, 7562.0f, 7563.0f, 7564.0f, 7565.0f, 7566.0f, 7567.0f, 7568.0f, 7569.0f, 7570.0f, 7571.0f, 7572.0f, 7573.0f, 7574.0f, 7575.0f, 7576.0f, 7577.0f, 7578.0f, 7579.0f, 7580.0f, 7581.0f, 7582.0f, 7583.0f, 7584.0f, 7585.0f, 7586.0f, 7587.0f, 7588.0f, 7589.0f, 7590.0f, 7591.0f, 7592.0f, 7593.0f, 7594.0f, 7595.0f, 7596.0f, 7597.0f, 7598.0f, 7599.0f, 7600.0f, 7601.0f, 7602.0f, 7603.0f, 7604.0f, 7605.0f, 7606.0f, 7607.0f, 7608.0f, 7609.0f, 7610.0f, 7611.0f, 7612.0f, 7613.0f, 7614.0f, 7615.0f, 7616.0f, 7617.0f, 7618.0f, 7619.0f, 7620.0f, 7621.0f, 7622.0f, 7623.0f, 7624.0f, 7625.0f, 7626.0f, 7627.0f, 7628.0f, 7629.0f, 7630.0f, 7631.0f, 7632.0f, 7633.0f, 7634.0f, 7635.0f, 7636.0f, 7637.0f, 7638.0f, 7639.0f, 7640.0f, 7641.0f, 7642.0f, 7643.0f, 7644.0f, 7645.0f, 7646.0f, 7647.0f, 7648.0f, 7649.0f, 7650.0f, 7651.0f, 7652.0f, 7653.0f, 7654.0f, 7655.0f, 7656.0f, 7657.0f, 7658.0f, 7659.0f, 7660.0f, 7661.0f, 7662.0f, 7663.0f, 7664.0f, 7665.0f, 7666.0f, 7667.0f, 7668.0f, 7669.0f, 7670.0f, 7671.0f, 7672.0f, 7673.0f, 7674.0f, 7675.0f, 7676.0f, 7677.0f, 7678.0f, 7679.0f, 7680.0f, 7681.0f, 7682.0f, 7683.0f, 7684.0f, 7685.0f, 7686.0f, 7687.0f, 7688.0f, 7689.0f, 7690.0f, 7691.0f, 7692.0f, 7693.0f, 7694.0f, 7695.0f, 7696.0f, 7697.0f, 7698.0f, 7699.0f, 7700.0f, 7701.0f, 7702.0f, 7703.0f, 7704.0f, 7705.0f, 7706.0f, 7707.0f, 7708.0f, 7709.0f, 7710.0f, 7711.0f, 7712.0f, 7713.0f, 7714.0f, 7715.0f, 7716.0f, 7717.0f, 7718.0f, 7719.0f, 7720.0f, 7721.0f, 7722.0f, 7723.0f, 7724.0f, 7725.0f, 7726.0f, 7727.0f, 7728.0f, 7729.0f, 7730.0f, 7731.0f, 7732.0f, 7733.0f, 7734.0f, 7735.0f, 7736.0f, 7737.0f, 7738.0f, 7739.0f, 7740.0f, 7741.0f, 7742.0f, 7743.0f, 7744.0f, 7745.0f, 7746.0f, 7747.0f, 7748.0f, 7749.0f, 7750.0f, 7751.0f, 7752.0f, 7753.0f, 7754.0f, 7755.0f, 7756.0f, 7757.0f, 7758.0f, 7759.0f, 7760.0f, 7761.0f, 7762.0f, 7763.0f, 7764.0f, 7765.0f, 7766.0f, 7767.0f, 7768.0f, 7769.0f, 7770.0f, 7771.0f, 7772.0f, 7773.0f, 7774.0f, 7775.0f, 7776.0f, 7777.0f, 7778.0f, 7779.0f, 7780.0f, 7781.0f, 7782.0f, 7783.0f, 7784.0f, 7785.0f, 7786.0f, 7787.0f, 7788.0f, 7789.0f, 7790.0f, 7791.0f, 7792.0f, 7793.0f, 7794.0f, 7795.0f, 7796.0f, 7797.0f, 7798.0f, 7799.0f, 7800.0f, 7801.0f, 7802.0f, 7803.0f, 7804.0f, 7805.0f, 7806.0f, 7807.0f, 7808.0f, 7809.0f, 7810.0f, 7811.0f, 7812.0f, 7813.0f, 7814.0f, 7815.0f, 7816.0f, 7817.0f, 7818.0f, 7819.0f, 7820.0f, 7821.0f, 7822.0f, 7823.0f, 7824.0f, 7825.0f, 7826.0f, 7827.0f, 7828.0f, 7829.0f, 7830.0f, 7831.0f, 7832.0f, 7833.0f, 7834.0f, 7835.0f, 7836.0f, 7837.0f, 7838.0f, 7839.0f, 7840.0f, 7841.0f, 7842.0f, 7843.0f, 7844.0f, 7845.0f, 7846.0f, 7847.0f, 7848.0f, 7849.0f, 7850.0f, 7851.0f, 7852.0f, 7853.0f, 7854.0f, 7855.0f, 7856.0f, 7857.0f, 7858.0f, 7859.0f, 7860.0f, 7861.0f, 7862.0f, 7863.0f, 7864.0f, 7865.0f, 7866.0f, 7867.0f, 7868.0f, 7869.0f, 7870.0f, 7871.0f, 7872.0f, 7873.0f, 7874.0f, 7875.0f, 7876.0f, 7877.0f, 7878.0f, 7879.0f, 7880.0f, 7881.0f, 7882.0f, 7883.0f, 7884.0f, 7885.0f, 7886.0f, 7887.0f, 7888.0f, 7889.0f, 7890.0f, 7891.0f, 7892.0f, 7893.0f, 7894.0f, 7895.0f, 7896.0f, 7897.0f, 7898.0f, 7899.0f, 7900.0f, 7901.0f, 7902.0f, 7903.0f, 7904.0f, 7905.0f, 7906.0f, 7907.0f, 7908.0f, 7909.0f, 7910.0f, 7911.0f, 7912.0f, 7913.0f, 7914.0f, 7915.0f, 7916.0f, 7917.0f, 7918.0f, 7919.0f, 7920.0f, 7921.0f, 7922.0f, 7923.0f, 7924.0f, 7925.0f, 7926.0f, 7927.0f, 7928.0f, 7929.0f, 7930.0f, 7931.0f, 7932.0f, 7933.0f, 7934.0f, 7935.0f, 7936.0f, 7937.0f, 7938.0f, 7939.0f, 7940.0f, 7941.0f, 7942.0f, 7943.0f, 7944.0f, 7945.0f, 7946.0f, 7947.0f, 7948.0f, 7949.0f, 7950.0f, 7951.0f, 7952.0f, 7953.0f, 7954.0f, 7955.0f, 7956.0f, 7957.0f, 7958.0f, 7959.0f, 7960.0f, 7961.0f, 7962.0f, 7963.0f, 7964.0f, 7965.0f, 7966.0f, 7967.0f, 7968.0f, 7969.0f, 7970.0f, 7971.0f, 7972.0f, 7973.0f, 7974.0f, 7975.0f, 7976.0f, 7977.0f, 7978.0f, 7979.0f, 7980.0f, 7981.0f, 7982.0f, 7983.0f, 7984.0f, 7985.0f, 7986.0f, 7987.0f, 7988.0f, 7989.0f, 7990.0f, 7991.0f, 7992.0f, 7993.0f, 7994.0f, 7995.0f, 7996.0f, 7997.0f, 7998.0f, 7999.0f, 8000.0f, 8001.0f, 8002.0f, 8003.0f, 8004.0f, 8005.0f, 8006.0f, 8007.0f, 8008.0f, 8009.0f, 8010.0f, 8011.0f, 8012.0f, 8013.0f, 8014.0f, 8015.0f, 8016.0f, 8017.0f, 8018.0f, 8019.0f, 8020.0f, 8021.0f, 8022.0f, 8023.0f, 8024.0f, 8025.0f, 8026.0f, 8027.0f, 8028.0f, 8029.0f, 8030.0f, 8031.0f, 8032.0f, 8033.0f, 8034.0f, 8035.0f, 8036.0f, 8037.0f, 8038.0f, 8039.0f, 8040.0f, 8041.0f, 8042.0f, 8043.0f, 8044.0f, 8045.0f, 8046.0f, 8047.0f, 8048.0f, 8049.0f, 8050.0f, 8051.0f, 8052.0f, 8053.0f, 8054.0f, 8055.0f, 8056.0f, 8057.0f, 8058.0f, 8059.0f, 8060.0f, 8061.0f, 8062.0f, 8063.0f, 8064.0f, 8065.0f, 8066.0f, 8067.0f, 8068.0f, 8069.0f, 8070.0f, 8071.0f, 8072.0f, 8073.0f, 8074.0f, 8075.0f, 8076.0f, 8077.0f, 8078.0f, 8079.0f, 8080.0f, 8081.0f, 8082.0f, 8083.0f, 8084.0f, 8085.0f, 8086.0f, 8087.0f, 8088.0f, 8089.0f, 8090.0f, 8091.0f, 8092.0f, 8093.0f, 8094.0f, 8095.0f, 8096.0f, 8097.0f, 8098.0f, 8099.0f, 8100.0f, 8101.0f, 8102.0f, 8103.0f, 8104.0f, 8105.0f, 8106.0f, 8107.0f, 8108.0f, 8109.0f, 8110.0f, 8111.0f, 8112.0f, 8113.0f, 8114.0f, 8115.0f, 8116.0f, 8117.0f, 8118.0f, 8119.0f, 8120.0f, 8121.0f, 8122.0f, 8123.0f, 8124.0f, 8125.0f, 8126.0f, 8127.0f, 8128.0f, 8129.0f, 8130.0f, 8131.0f, 8132.0f, 8133.0f, 8134.0f, 8135.0f, 8136.0f, 8137.0f, 8138.0f, 8139.0f, 8140.0f, 8141.0f, 8142.0f, 8143.0f, 8144.0f, 8145.0f, 8146.0f, 8147.0f, 8148.0f, 8149.0f, 8150.0f, 8151.0f, 8152.0f, 8153.0f, 8154.0f, 8155.0f, 8156.0f, 8157.0f, 8158.0f, 8159.0f, 8160.0f, 8161.0f, 8162.0f, 8163.0f, 8164.0f, 8165.0f, 8166.0f, 8167.0f, 8168.0f, 8169.0f, 8170.0f, 8171.0f, 8172.0f, 8173.0f, 8174.0f, 8175.0f, 8176.0f, 8177.0f, 8178.0f, 8179.0f, 8180.0f, 8181.0f, 8182.0f, 8183.0f, 8184.0f, 8185.0f, 8186.0f, 8187.0f, 8188.0f, 8189.0f, 8190.0f, 8191.0f, 8192.0f, 8193.0f, 8194.0f, 8195.0f, 8196.0f, 8197.0f, 8198.0f, 8199.0f, 8200.0f, 8201.0f, 8202.0f, 8203.0f, 8204.0f, 8205.0f, 8206.0f, 8207.0f, 8208.0f, 8209.0f, 8210.0f, 8211.0f, 8212.0f, 8213.0f, 8214.0f, 8215.0f, 8216.0f, 8217.0f, 8218.0f, 8219.0f, 8220.0f, 8221.0f, 8222.0f, 8223.0f, 8224.0f, 8225.0f, 8226.0f, 8227.0f, 8228.0f, 8229.0f, 8230.0f, 8231.0f, 8232.0f, 8233.0f, 8234.0f, 8235.0f, 8236.0f, 8237.0f, 8238.0f, 8239.0f, 8240.0f, 8241.0f, 8242.0f, 8243.0f, 8244.0f, 8245.0f, 8246.0f, 8247.0f, 8248.0f, 8249.0f, 8250.0f, 8251.0f, 8252.0f, 8253.0f, 8254.0f, 8255.0f, 8256.0f, 8257.0f, 8258.0f, 8259.0f, 8260.0f, 8261.0f, 8262.0f, 8263.0f, 8264.0f, 8265.0f, 8266.0f, 8267.0f, 8268.0f, 8269.0f, 8270.0f, 8271.0f, 8272.0f, 8273.0f, 8274.0f, 8275.0f, 8276.0f, 8277.0f, 8278.0f, 8279.0f, 8280.0f, 8281.0f, 8282.0f, 8283.0f, 8284.0f, 8285.0f, 8286.0f, 8287.0f, 8288.0f, 8289.0f, 8290.0f, 8291.0f, 8292.0f, 8293.0f, 8294.0f, 8295.0f, 8296.0f, 8297.0f, 8298.0f, 8299.0f, 8300.0f, 8301.0f, 8302.0f, 8303.0f, 8304.0f, 8305.0f, 8306.0f, 8307.0f, 8308.0f, 8309.0f, 8310.0f, 8311.0f, 8312.0f, 8313.0f, 8314.0f, 8315.0f, 8316.0f, 8317.0f, 8318.0f, 8319.0f, 8320.0f, 8321.0f, 8322.0f, 8323.0f, 8324.0f, 8325.0f, 8326.0f, 8327.0f, 8328.0f, 8329.0f, 8330.0f, 8331.0f, 8332.0f, 8333.0f, 8334.0f, 8335.0f, 8336.0f, 8337.0f, 8338.0f, 8339.0f, 8340.0f, 8341.0f, 8342.0f, 8343.0f, 8344.0f, 8345.0f, 8346.0f, 8347.0f, 8348.0f, 8349.0f, 8350.0f, 8351.0f, 8352.0f, 8353.0f, 8354.0f, 8355.0f, 8356.0f, 8357.0f, 8358.0f, 8359.0f, 8360.0f, 8361.0f, 8362.0f, 8363.0f, 8364.0f, 8365.0f, 8366.0f, 8367.0f, 8368.0f, 8369.0f, 8370.0f, 8371.0f, 8372.0f, 8373.0f, 8374.0f, 8375.0f, 8376.0f, 8377.0f, 8378.0f, 8379.0f, 8380.0f, 8381.0f, 8382.0f, 8383.0f, 8384.0f, 8385.0f, 8386.0f, 8387.0f, 8388.0f, 8389.0f, 8390.0f, 8391.0f, 8392.0f, 8393.0f, 8394.0f, 8395.0f, 8396.0f, 8397.0f, 8398.0f, 8399.0f, 8400.0f, 8401.0f, 8402.0f, 8403.0f, 8404.0f, 8405.0f, 8406.0f, 8407.0f, 8408.0f, 8409.0f, 8410.0f, 8411.0f, 8412.0f, 8413.0f, 8414.0f, 8415.0f, 8416.0f, 8417.0f, 8418.0f, 8419.0f, 8420.0f, 8421.0f, 8422.0f, 8423.0f, 8424.0f, 8425.0f, 8426.0f, 8427.0f, 8428.0f, 8429.0f, 8430.0f, 8431.0f, 8432.0f, 8433.0f, 8434.0f, 8435.0f, 8436.0f, 8437.0f, 8438.0f, 8439.0f, 8440.0f, 8441.0f, 8442.0f, 8443.0f, 8444.0f, 8445.0f, 8446.0f, 8447.0f, 8448.0f, 8449.0f, 8450.0f, 8451.0f, 8452.0f, 8453.0f, 8454.0f, 8455.0f, 8456.0f, 8457.0f, 8458.0f, 8459.0f, 8460.0f, 8461.0f, 8462.0f, 8463.0f, 8464.0f, 8465.0f, 8466.0f, 8467.0f, 8468.0f, 8469.0f, 8470.0f, 8471.0f, 8472.0f, 8473.0f, 8474.0f, 8475.0f, 8476.0f, 8477.0f, 8478.0f, 8479.0f, 8480.0f, 8481.0f, 8482.0f, 8483.0f, 8484.0f, 8485.0f, 8486.0f, 8487.0f, 8488.0f, 8489.0f, 8490.0f, 8491.0f, 8492.0f, 8493.0f, 8494.0f, 8495.0f, 8496.0f, 8497.0f, 8498.0f, 8499.0f, 8500.0f, 8501.0f, 8502.0f, 8503.0f, 8504.0f, 8505.0f, 8506.0f, 8507.0f, 8508.0f, 8509.0f, 8510.0f, 8511.0f, 8512.0f, 8513.0f, 8514.0f, 8515.0f, 8516.0f, 8517.0f, 8518.0f, 8519.0f, 8520.0f, 8521.0f, 8522.0f, 8523.0f, 8524.0f, 8525.0f, 8526.0f, 8527.0f, 8528.0f, 8529.0f, 8530.0f, 8531.0f, 8532.0f, 8533.0f, 8534.0f, 8535.0f, 8536.0f, 8537.0f, 8538.0f, 8539.0f, 8540.0f, 8541.0f, 8542.0f, 8543.0f, 8544.0f, 8545.0f, 8546.0f, 8547.0f, 8548.0f, 8549.0f, 8550.0f, 8551.0f, 8552.0f, 8553.0f, 8554.0f, 8555.0f, 8556.0f, 8557.0f, 8558.0f, 8559.0f, 8560.0f, 8561.0f, 8562.0f, 8563.0f, 8564.0f, 8565.0f, 8566.0f, 8567.0f, 8568.0f, 8569.0f, 8570.0f, 8571.0f, 8572.0f, 8573.0f, 8574.0f, 8575.0f, 8576.0f, 8577.0f, 8578.0f, 8579.0f, 8580.0f, 8581.0f, 8582.0f, 8583.0f, 8584.0f, 8585.0f, 8586.0f, 8587.0f, 8588.0f, 8589.0f, 8590.0f, 8591.0f, 8592.0f, 8593.0f, 8594.0f, 8595.0f, 8596.0f, 8597.0f, 8598.0f, 8599.0f, 8600.0f, 8601.0f, 8602.0f, 8603.0f, 8604.0f, 8605.0f, 8606.0f, 8607.0f, 8608.0f, 8609.0f, 8610.0f, 8611.0f, 8612.0f, 8613.0f, 8614.0f, 8615.0f, 8616.0f, 8617.0f, 8618.0f, 8619.0f, 8620.0f, 8621.0f, 8622.0f, 8623.0f, 8624.0f, 8625.0f, 8626.0f, 8627.0f, 8628.0f, 8629.0f, 8630.0f, 8631.0f, 8632.0f, 8633.0f, 8634.0f, 8635.0f, 8636.0f, 8637.0f, 8638.0f, 8639.0f, 8640.0f, 8641.0f, 8642.0f, 8643.0f, 8644.0f, 8645.0f, 8646.0f, 8647.0f, 8648.0f, 8649.0f, 8650.0f, 8651.0f, 8652.0f, 8653.0f, 8654.0f, 8655.0f, 8656.0f, 8657.0f, 8658.0f, 8659.0f, 8660.0f, 8661.0f, 8662.0f, 8663.0f, 8664.0f, 8665.0f, 8666.0f, 8667.0f, 8668.0f, 8669.0f, 8670.0f, 8671.0f, 8672.0f, 8673.0f, 8674.0f, 8675.0f, 8676.0f, 8677.0f, 8678.0f, 8679.0f, 8680.0f, 8681.0f, 8682.0f, 8683.0f, 8684.0f, 8685.0f, 8686.0f, 8687.0f, 8688.0f, 8689.0f, 8690.0f, 8691.0f, 8692.0f, 8693.0f, 8694.0f, 8695.0f, 8696.0f, 8697.0f, 8698.0f, 8699.0f, 8700.0f, 8701.0f, 8702.0f, 8703.0f, 8704.0f, 8705.0f, 8706.0f, 8707.0f, 8708.0f, 8709.0f, 8710.0f, 8711.0f, 8712.0f, 8713.0f, 8714.0f, 8715.0f, 8716.0f, 8717.0f, 8718.0f, 8719.0f, 8720.0f, 8721.0f, 8722.0f, 8723.0f, 8724.0f, 8725.0f, 8726.0f, 8727.0f, 8728.0f, 8729.0f, 8730.0f, 8731.0f, 8732.0f, 8733.0f, 8734.0f, 8735.0f, 8736.0f, 8737.0f, 8738.0f, 8739.0f, 8740.0f, 8741.0f, 8742.0f, 8743.0f, 8744.0f, 8745.0f, 8746.0f, 8747.0f, 8748.0f, 8749.0f, 8750.0f, 8751.0f, 8752.0f, 8753.0f, 8754.0f, 8755.0f, 8756.0f, 8757.0f, 8758.0f, 8759.0f, 8760.0f, 8761.0f, 8762.0f, 8763.0f, 8764.0f, 8765.0f, 8766.0f, 8767.0f, 8768.0f, 8769.0f, 8770.0f, 8771.0f, 8772.0f, 8773.0f, 8774.0f, 8775.0f, 8776.0f, 8777.0f, 8778.0f, 8779.0f, 8780.0f, 8781.0f, 8782.0f, 8783.0f, 8784.0f, 8785.0f, 8786.0f, 8787.0f, 8788.0f, 8789.0f, 8790.0f, 8791.0f, 8792.0f, 8793.0f, 8794.0f, 8795.0f, 8796.0f, 8797.0f, 8798.0f, 8799.0f, 8800.0f, 8801.0f, 8802.0f, 8803.0f, 8804.0f, 8805.0f, 8806.0f, 8807.0f, 8808.0f, 8809.0f, 8810.0f, 8811.0f, 8812.0f, 8813.0f, 8814.0f, 8815.0f, 8816.0f, 8817.0f, 8818.0f, 8819.0f, 8820.0f, 8821.0f, 8822.0f, 8823.0f, 8824.0f, 8825.0f, 8826.0f, 8827.0f, 8828.0f, 8829.0f, 8830.0f, 8831.0f, 8832.0f, 8833.0f, 8834.0f, 8835.0f, 8836.0f, 8837.0f, 8838.0f, 8839.0f, 8840.0f, 8841.0f, 8842.0f, 8843.0f, 8844.0f, 8845.0f, 8846.0f, 8847.0f, 8848.0f, 8849.0f, 8850.0f, 8851.0f, 8852.0f, 8853.0f, 8854.0f, 8855.0f, 8856.0f, 8857.0f, 8858.0f, 8859.0f, 8860.0f, 8861.0f, 8862.0f, 8863.0f, 8864.0f, 8865.0f, 8866.0f, 8867.0f, 8868.0f, 8869.0f, 8870.0f, 8871.0f, 8872.0f, 8873.0f, 8874.0f, 8875.0f, 8876.0f, 8877.0f, 8878.0f, 8879.0f, 8880.0f, 8881.0f, 8882.0f, 8883.0f, 8884.0f, 8885.0f, 8886.0f, 8887.0f, 8888.0f, 8889.0f, 8890.0f, 8891.0f, 8892.0f, 8893.0f, 8894.0f, 8895.0f, 8896.0f, 8897.0f, 8898.0f, 8899.0f, 8900.0f, 8901.0f, 8902.0f, 8903.0f, 8904.0f, 8905.0f, 8906.0f, 8907.0f, 8908.0f, 8909.0f, 8910.0f, 8911.0f, 8912.0f, 8913.0f, 8914.0f, 8915.0f, 8916.0f, 8917.0f, 8918.0f, 8919.0f, 8920.0f, 8921.0f, 8922.0f, 8923.0f, 8924.0f, 8925.0f, 8926.0f, 8927.0f, 8928.0f, 8929.0f, 8930.0f, 8931.0f, 8932.0f, 8933.0f, 8934.0f, 8935.0f, 8936.0f, 8937.0f, 8938.0f, 8939.0f, 8940.0f, 8941.0f, 8942.0f, 8943.0f, 8944.0f, 8945.0f, 8946.0f, 8947.0f, 8948.0f, 8949.0f, 8950.0f, 8951.0f, 8952.0f, 8953.0f, 8954.0f, 8955.0f, 8956.0f, 8957.0f, 8958.0f, 8959.0f, 8960.0f, 8961.0f, 8962.0f, 8963.0f, 8964.0f, 8965.0f, 8966.0f, 8967.0f, 8968.0f, 8969.0f, 8970.0f, 8971.0f, 8972.0f, 8973.0f, 8974.0f, 8975.0f, 8976.0f, 8977.0f, 8978.0f, 8979.0f, 8980.0f, 8981.0f, 8982.0f, 8983.0f, 8984.0f, 8985.0f, 8986.0f, 8987.0f, 8988.0f, 8989.0f, 8990.0f, 8991.0f, 8992.0f, 8993.0f, 8994.0f, 8995.0f, 8996.0f, 8997.0f, 8998.0f, 8999.0f, 9000.0f, 9001.0f, 9002.0f, 9003.0f, 9004.0f, 9005.0f, 9006.0f, 9007.0f, 9008.0f, 9009.0f, 9010.0f, 9011.0f, 9012.0f, 9013.0f, 9014.0f, 9015.0f, 9016.0f, 9017.0f, 9018.0f, 9019.0f, 9020.0f, 9021.0f, 9022.0f, 9023.0f, 9024.0f, 9025.0f, 9026.0f, 9027.0f, 9028.0f, 9029.0f, 9030.0f, 9031.0f, 9032.0f, 9033.0f, 9034.0f, 9035.0f, 9036.0f, 9037.0f, 9038.0f, 9039.0f, 9040.0f, 9041.0f, 9042.0f, 9043.0f, 9044.0f, 9045.0f, 9046.0f, 9047.0f, 9048.0f, 9049.0f, 9050.0f, 9051.0f, 9052.0f, 9053.0f, 9054.0f, 9055.0f, 9056.0f, 9057.0f, 9058.0f, 9059.0f, 9060.0f, 9061.0f, 9062.0f, 9063.0f, 9064.0f, 9065.0f, 9066.0f, 9067.0f, 9068.0f, 9069.0f, 9070.0f, 9071.0f, 9072.0f, 9073.0f, 9074.0f, 9075.0f, 9076.0f, 9077.0f, 9078.0f, 9079.0f, 9080.0f, 9081.0f, 9082.0f, 9083.0f, 9084.0f, 9085.0f, 9086.0f, 9087.0f, 9088.0f, 9089.0f, 9090.0f, 9091.0f, 9092.0f, 9093.0f, 9094.0f, 9095.0f, 9096.0f, 9097.0f, 9098.0f, 9099.0f, 9100.0f, 9101.0f, 9102.0f, 9103.0f, 9104.0f, 9105.0f, 9106.0f, 9107.0f, 9108.0f, 9109.0f, 9110.0f, 9111.0f, 9112.0f, 9113.0f, 9114.0f, 9115.0f, 9116.0f, 9117.0f, 9118.0f, 9119.0f, 9120.0f, 9121.0f, 9122.0f, 9123.0f, 9124.0f, 9125.0f, 9126.0f, 9127.0f, 9128.0f, 9129.0f, 9130.0f, 9131.0f, 9132.0f, 9133.0f, 9134.0f, 9135.0f, 9136.0f, 9137.0f, 9138.0f, 9139.0f, 9140.0f, 9141.0f, 9142.0f, 9143.0f, 9144.0f, 9145.0f, 9146.0f, 9147.0f, 9148.0f, 9149.0f, 9150.0f, 9151.0f, 9152.0f, 9153.0f, 9154.0f, 9155.0f, 9156.0f, 9157.0f, 9158.0f, 9159.0f, 9160.0f, 9161.0f, 9162.0f, 9163.0f, 9164.0f, 9165.0f, 9166.0f, 9167.0f, 9168.0f, 9169.0f, 9170.0f, 9171.0f, 9172.0f, 9173.0f, 9174.0f, 9175.0f, 9176.0f, 9177.0f, 9178.0f, 9179.0f, 9180.0f, 9181.0f, 9182.0f, 9183.0f, 9184.0f, 9185.0f, 9186.0f, 9187.0f, 9188.0f, 9189.0f, 9190.0f, 9191.0f, 9192.0f, 9193.0f, 9194.0f, 9195.0f, 9196.0f, 9197.0f, 9198.0f, 9199.0f, 9200.0f, 9201.0f, 9202.0f, 9203.0f, 9204.0f, 9205.0f, 9206.0f, 9207.0f, 9208.0f, 9209.0f, 9210.0f, 9211.0f, 9212.0f, 9213.0f, 9214.0f, 9215.0f, 9216.0f, 9217.0f, 9218.0f, 9219.0f, 9220.0f, 9221.0f, 9222.0f, 9223.0f, 9224.0f, 9225.0f, 9226.0f, 9227.0f, 9228.0f, 9229.0f, 9230.0f, 9231.0f, 9232.0f, 9233.0f, 9234.0f, 9235.0f, 9236.0f, 9237.0f, 9238.0f, 9239.0f, 9240.0f, 9241.0f, 9242.0f, 9243.0f, 9244.0f, 9245.0f, 9246.0f, 9247.0f, 9248.0f, 9249.0f, 9250.0f, 9251.0f, 9252.0f, 9253.0f, 9254.0f, 9255.0f, 9256.0f, 9257.0f, 9258.0f, 9259.0f, 9260.0f, 9261.0f, 9262.0f, 9263.0f, 9264.0f, 9265.0f, 9266.0f, 9267.0f, 9268.0f, 9269.0f, 9270.0f, 9271.0f, 9272.0f, 9273.0f, 9274.0f, 9275.0f, 9276.0f, 9277.0f, 9278.0f, 9279.0f, 9280.0f, 9281.0f, 9282.0f, 9283.0f, 9284.0f, 9285.0f, 9286.0f, 9287.0f, 9288.0f, 9289.0f, 9290.0f, 9291.0f, 9292.0f, 9293.0f, 9294.0f, 9295.0f, 9296.0f, 9297.0f, 9298.0f, 9299.0f, 9300.0f, 9301.0f, 9302.0f, 9303.0f, 9304.0f, 9305.0f, 9306.0f, 9307.0f, 9308.0f, 9309.0f, 9310.0f, 9311.0f, 9312.0f, 9313.0f, 9314.0f, 9315.0f, 9316.0f, 9317.0f, 9318.0f, 9319.0f, 9320.0f, 9321.0f, 9322.0f, 9323.0f, 9324.0f, 9325.0f, 9326.0f, 9327.0f, 9328.0f, 9329.0f, 9330.0f, 9331.0f, 9332.0f, 9333.0f, 9334.0f, 9335.0f, 9336.0f, 9337.0f, 9338.0f, 9339.0f, 9340.0f, 9341.0f, 9342.0f, 9343.0f, 9344.0f, 9345.0f, 9346.0f, 9347.0f, 9348.0f, 9349.0f, 9350.0f, 9351.0f, 9352.0f, 9353.0f, 9354.0f, 9355.0f, 9356.0f, 9357.0f, 9358.0f, 9359.0f, 9360.0f, 9361.0f, 9362.0f, 9363.0f, 9364.0f, 9365.0f, 9366.0f, 9367.0f, 9368.0f, 9369.0f, 9370.0f, 9371.0f, 9372.0f, 9373.0f, 9374.0f, 9375.0f, 9376.0f, 9377.0f, 9378.0f, 9379.0f, 9380.0f, 9381.0f, 9382.0f, 9383.0f, 9384.0f, 9385.0f, 9386.0f, 9387.0f, 9388.0f, 9389.0f, 9390.0f, 9391.0f, 9392.0f, 9393.0f, 9394.0f, 9395.0f, 9396.0f, 9397.0f, 9398.0f, 9399.0f, 9400.0f, 9401.0f, 9402.0f, 9403.0f, 9404.0f, 9405.0f, 9406.0f, 9407.0f, 9408.0f, 9409.0f, 9410.0f, 9411.0f, 9412.0f, 9413.0f, 9414.0f, 9415.0f, 9416.0f, 9417.0f, 9418.0f, 9419.0f, 9420.0f, 9421.0f, 9422.0f, 9423.0f, 9424.0f, 9425.0f, 9426.0f, 9427.0f, 9428.0f, 9429.0f, 9430.0f, 9431.0f, 9432.0f, 9433.0f, 9434.0f, 9435.0f, 9436.0f, 9437.0f, 9438.0f, 9439.0f, 9440.0f, 9441.0f, 9442.0f, 9443.0f, 9444.0f, 9445.0f, 9446.0f, 9447.0f, 9448.0f, 9449.0f, 9450.0f, 9451.0f, 9452.0f, 9453.0f, 9454.0f, 9455.0f, 9456.0f, 9457.0f, 9458.0f, 9459.0f, 9460.0f, 9461.0f, 9462.0f, 9463.0f, 9464.0f, 9465.0f, 9466.0f, 9467.0f, 9468.0f, 9469.0f, 9470.0f, 9471.0f, 9472.0f, 9473.0f, 9474.0f, 9475.0f, 9476.0f, 9477.0f, 9478.0f, 9479.0f, 9480.0f, 9481.0f, 9482.0f, 9483.0f, 9484.0f, 9485.0f, 9486.0f, 9487.0f, 9488.0f, 9489.0f, 9490.0f, 9491.0f, 9492.0f, 9493.0f, 9494.0f, 9495.0f, 9496.0f, 9497.0f, 9498.0f, 9499.0f, 9500.0f, 9501.0f, 9502.0f, 9503.0f, 9504.0f, 9505.0f, 9506.0f, 9507.0f, 9508.0f, 9509.0f, 9510.0f, 9511.0f, 9512.0f, 9513.0f, 9514.0f, 9515.0f, 9516.0f, 9517.0f, 9518.0f, 9519.0f, 9520.0f, 9521.0f, 9522.0f, 9523.0f, 9524.0f, 9525.0f, 9526.0f, 9527.0f, 9528.0f, 9529.0f, 9530.0f, 9531.0f, 9532.0f, 9533.0f, 9534.0f, 9535.0f, 9536.0f, 9537.0f, 9538.0f, 9539.0f, 9540.0f, 9541.0f, 9542.0f, 9543.0f, 9544.0f, 9545.0f, 9546.0f, 9547.0f, 9548.0f, 9549.0f, 9550.0f, 9551.0f, 9552.0f, 9553.0f, 9554.0f, 9555.0f, 9556.0f, 9557.0f, 9558.0f, 9559.0f, 9560.0f, 9561.0f, 9562.0f, 9563.0f, 9564.0f, 9565.0f, 9566.0f, 9567.0f, 9568.0f, 9569.0f, 9570.0f, 9571.0f, 9572.0f, 9573.0f, 9574.0f, 9575.0f, 9576.0f, 9577.0f, 9578.0f, 9579.0f, 9580.0f, 9581.0f, 9582.0f, 9583.0f, 9584.0f, 9585.0f, 9586.0f, 9587.0f, 9588.0f, 9589.0f, 9590.0f, 9591.0f, 9592.0f, 9593.0f, 9594.0f, 9595.0f, 9596.0f, 9597.0f, 9598.0f, 9599.0f, 9600.0f, 9601.0f, 9602.0f, 9603.0f, 9604.0f, 9605.0f, 9606.0f, 9607.0f, 9608.0f, 9609.0f, 9610.0f, 9611.0f, 9612.0f, 9613.0f, 9614.0f, 9615.0f, 9616.0f, 9617.0f, 9618.0f, 9619.0f, 9620.0f, 9621.0f, 9622.0f, 9623.0f, 9624.0f, 9625.0f, 9626.0f, 9627.0f, 9628.0f, 9629.0f, 9630.0f, 9631.0f, 9632.0f, 9633.0f, 9634.0f, 9635.0f, 9636.0f, 9637.0f, 9638.0f, 9639.0f, 9640.0f, 9641.0f, 9642.0f, 9643.0f, 9644.0f, 9645.0f, 9646.0f, 9647.0f, 9648.0f, 9649.0f, 9650.0f, 9651.0f, 9652.0f, 9653.0f, 9654.0f, 9655.0f, 9656.0f, 9657.0f, 9658.0f, 9659.0f, 9660.0f, 9661.0f, 9662.0f, 9663.0f, 9664.0f, 9665.0f, 9666.0f, 9667.0f, 9668.0f, 9669.0f, 9670.0f, 9671.0f, 9672.0f, 9673.0f, 9674.0f, 9675.0f, 9676.0f, 9677.0f, 9678.0f, 9679.0f, 9680.0f, 9681.0f, 9682.0f, 9683.0f, 9684.0f, 9685.0f, 9686.0f, 9687.0f, 9688.0f, 9689.0f, 9690.0f, 9691.0f, 9692.0f, 9693.0f, 9694.0f, 9695.0f, 9696.0f, 9697.0f, 9698.0f, 9699.0f, 9700.0f, 9701.0f, 9702.0f, 9703.0f, 9704.0f, 9705.0f, 9706.0f, 9707.0f, 9708.0f, 9709.0f, 9710.0f, 9711.0f, 9712.0f, 9713.0f, 9714.0f, 9715.0f, 9716.0f, 9717.0f, 9718.0f, 9719.0f, 9720.0f, 9721.0f, 9722.0f, 9723.0f, 9724.0f, 9725.0f, 9726.0f, 9727.0f, 9728.0f, 9729.0f, 9730.0f, 9731.0f, 9732.0f, 9733.0f, 9734.0f, 9735.0f, 9736.0f, 9737.0f, 9738.0f, 9739.0f, 9740.0f, 9741.0f, 9742.0f, 9743.0f, 9744.0f, 9745.0f, 9746.0f, 9747.0f, 9748.0f, 9749.0f, 9750.0f, 9751.0f, 9752.0f, 9753.0f, 9754.0f, 9755.0f, 9756.0f, 9757.0f, 9758.0f, 9759.0f, 9760.0f, 9761.0f, 9762.0f, 9763.0f, 9764.0f, 9765.0f, 9766.0f, 9767.0f, 9768.0f, 9769.0f, 9770.0f, 9771.0f, 9772.0f, 9773.0f, 9774.0f, 9775.0f, 9776.0f, 9777.0f, 9778.0f, 9779.0f, 9780.0f, 9781.0f, 9782.0f, 9783.0f, 9784.0f, 9785.0f, 9786.0f, 9787.0f, 9788.0f, 9789.0f, 9790.0f, 9791.0f, 9792.0f, 9793.0f, 9794.0f, 9795.0f, 9796.0f, 9797.0f, 9798.0f, 9799.0f, 9800.0f, 9801.0f, 9802.0f, 9803.0f, 9804.0f, 9805.0f, 9806.0f, 9807.0f, 9808.0f, 9809.0f, 9810.0f, 9811.0f, 9812.0f, 9813.0f, 9814.0f, 9815.0f, 9816.0f, 9817.0f, 9818.0f, 9819.0f, 9820.0f, 9821.0f, 9822.0f, 9823.0f, 9824.0f, 9825.0f, 9826.0f, 9827.0f, 9828.0f, 9829.0f, 9830.0f, 9831.0f, 9832.0f, 9833.0f, 9834.0f, 9835.0f, 9836.0f, 9837.0f, 9838.0f, 9839.0f, 9840.0f, 9841.0f, 9842.0f, 9843.0f, 9844.0f, 9845.0f, 9846.0f, 9847.0f, 9848.0f, 9849.0f, 9850.0f, 9851.0f, 9852.0f, 9853.0f, 9854.0f, 9855.0f, 9856.0f, 9857.0f, 9858.0f, 9859.0f, 9860.0f, 9861.0f, 9862.0f, 9863.0f, 9864.0f, 9865.0f, 9866.0f, 9867.0f, 9868.0f, 9869.0f, 9870.0f, 9871.0f, 9872.0f, 9873.0f, 9874.0f, 9875.0f, 9876.0f, 9877.0f, 9878.0f, 9879.0f, 9880.0f, 9881.0f, 9882.0f, 9883.0f, 9884.0f, 9885.0f, 9886.0f, 9887.0f, 9888.0f, 9889.0f, 9890.0f, 9891.0f, 9892.0f, 9893.0f, 9894.0f, 9895.0f, 9896.0f, 9897.0f, 9898.0f, 9899.0f, 9900.0f, 9901.0f, 9902.0f, 9903.0f, 9904.0f, 9905.0f, 9906.0f, 9907.0f, 9908.0f, 9909.0f, 9910.0f, 9911.0f, 9912.0f, 9913.0f, 9914.0f, 9915.0f, 9916.0f, 9917.0f, 9918.0f, 9919.0f, 9920.0f, 9921.0f, 9922.0f, 9923.0f, 9924.0f, 9925.0f, 9926.0f, 9927.0f, 9928.0f, 9929.0f, 9930.0f, 9931.0f, 9932.0f, 9933.0f, 9934.0f, 9935.0f, 9936.0f, 9937.0f, 9938.0f, 9939.0f, 9940.0f, 9941.0f, 9942.0f, 9943.0f, 9944.0f, 9945.0f, 9946.0f, 9947.0f, 9948.0f, 9949.0f, 9950.0f, 9951.0f, 9952.0f, 9953.0f, 9954.0f, 9955.0f, 9956.0f, 9957.0f, 9958.0f, 9959.0f, 9960.0f, 9961.0f, 9962.0f, 9963.0f, 9964.0f, 9965.0f, 9966.0f, 9967.0f, 9968.0f, 9969.0f, 9970.0f, 9971.0f, 9972.0f, 9973.0f, 9974.0f, 9975.0f, 9976.0f, 9977.0f, 9978.0f, 9979.0f, 9980.0f, 9981.0f, 9982.0f, 9983.0f, 9984.0f, 9985.0f, 9986.0f, 9987.0f, 9988.0f, 9989.0f, 9990.0f, 9991.0f, 9992.0f, 9993.0f, 9994.0f, 9995.0f, 9996.0f, 9997.0f, 9998.0f, 9999.0f, 10000.0f, 10001.0f, 10002.0f, 10003.0f, 10004.0f, 10005.0f, 10006.0f, 10007.0f, 10008.0f, 10009.0f, 10010.0f, 10011.0f, 10012.0f, 10013.0f, 10014.0f, 10015.0f, 10016.0f, 10017.0f, 10018.0f, 10019.0f, 10020.0f, 10021.0f, 10022.0f, 10023.0f, 10024.0f, 10025.0f, 10026.0f, 10027.0f, 10028.0f, 10029.0f, 10030.0f, 10031.0f, 10032.0f, 10033.0f, 10034.0f, 10035.0f, 10036.0f, 10037.0f, 10038.0f, 10039.0f, 10040.0f, 10041.0f, 10042.0f, 10043.0f, 10044.0f, 10045.0f, 10046.0f, 10047.0f, 10048.0f, 10049.0f, 10050.0f, 10051.0f, 10052.0f, 10053.0f, 10054.0f, 10055.0f, 10056.0f, 10057.0f, 10058.0f, 10059.0f, 10060.0f, 10061.0f, 10062.0f, 10063.0f, 10064.0f, 10065.0f, 10066.0f, 10067.0f, 10068.0f, 10069.0f, 10070.0f, 10071.0f, 10072.0f, 10073.0f, 10074.0f, 10075.0f, 10076.0f, 10077.0f, 10078.0f, 10079.0f, 10080.0f, 10081.0f, 10082.0f, 10083.0f, 10084.0f, 10085.0f, 10086.0f, 10087.0f, 10088.0f, 10089.0f, 10090.0f, 10091.0f, 10092.0f, 10093.0f, 10094.0f, 10095.0f, 10096.0f, 10097.0f, 10098.0f, 10099.0f, 10100.0f, 10101.0f, 10102.0f, 10103.0f, 10104.0f, 10105.0f, 10106.0f, 10107.0f, 10108.0f, 10109.0f, 10110.0f, 10111.0f, 10112.0f, 10113.0f, 10114.0f, 10115.0f, 10116.0f, 10117.0f, 10118.0f, 10119.0f, 10120.0f, 10121.0f, 10122.0f, 10123.0f, 10124.0f, 10125.0f, 10126.0f, 10127.0f, 10128.0f, 10129.0f, 10130.0f, 10131.0f, 10132.0f, 10133.0f, 10134.0f, 10135.0f, 10136.0f, 10137.0f, 10138.0f, 10139.0f, 10140.0f, 10141.0f, 10142.0f, 10143.0f, 10144.0f, 10145.0f, 10146.0f, 10147.0f, 10148.0f, 10149.0f, 10150.0f, 10151.0f, 10152.0f, 10153.0f, 10154.0f, 10155.0f, 10156.0f, 10157.0f, 10158.0f, 10159.0f, 10160.0f, 10161.0f, 10162.0f, 10163.0f, 10164.0f, 10165.0f, 10166.0f, 10167.0f, 10168.0f, 10169.0f, 10170.0f, 10171.0f, 10172.0f, 10173.0f, 10174.0f, 10175.0f, 10176.0f, 10177.0f, 10178.0f, 10179.0f, 10180.0f, 10181.0f, 10182.0f, 10183.0f, 10184.0f, 10185.0f, 10186.0f, 10187.0f, 10188.0f, 10189.0f, 10190.0f, 10191.0f, 10192.0f, 10193.0f, 10194.0f, 10195.0f, 10196.0f, 10197.0f, 10198.0f, 10199.0f, 10200.0f, 10201.0f, 10202.0f, 10203.0f, 10204.0f, 10205.0f, 10206.0f, 10207.0f, 10208.0f, 10209.0f, 10210.0f, 10211.0f, 10212.0f, 10213.0f, 10214.0f, 10215.0f, 10216.0f, 10217.0f, 10218.0f, 10219.0f, 10220.0f, 10221.0f, 10222.0f, 10223.0f, 10224.0f, 10225.0f, 10226.0f, 10227.0f, 10228.0f, 10229.0f, 10230.0f, 10231.0f, 10232.0f, 10233.0f, 10234.0f, 10235.0f, 10236.0f, 10237.0f, 10238.0f, 10239.0f, 10240.0f, 10241.0f, 10242.0f, 10243.0f, 10244.0f, 10245.0f, 10246.0f, 10247.0f, 10248.0f, 10249.0f, 10250.0f, 10251.0f, 10252.0f, 10253.0f, 10254.0f, 10255.0f, 10256.0f, 10257.0f, 10258.0f, 10259.0f, 10260.0f, 10261.0f, 10262.0f, 10263.0f, 10264.0f, 10265.0f, 10266.0f, 10267.0f, 10268.0f, 10269.0f, 10270.0f, 10271.0f, 10272.0f, 10273.0f, 10274.0f, 10275.0f, 10276.0f, 10277.0f, 10278.0f, 10279.0f, 10280.0f, 10281.0f, 10282.0f, 10283.0f, 10284.0f, 10285.0f, 10286.0f, 10287.0f, 10288.0f, 10289.0f, 10290.0f, 10291.0f, 10292.0f, 10293.0f, 10294.0f, 10295.0f, 10296.0f, 10297.0f, 10298.0f, 10299.0f, 10300.0f, 10301.0f, 10302.0f, 10303.0f, 10304.0f, 10305.0f, 10306.0f, 10307.0f, 10308.0f, 10309.0f, 10310.0f, 10311.0f, 10312.0f, 10313.0f, 10314.0f, 10315.0f, 10316.0f, 10317.0f, 10318.0f, 10319.0f, 10320.0f, 10321.0f, 10322.0f, 10323.0f, 10324.0f, 10325.0f, 10326.0f, 10327.0f, 10328.0f, 10329.0f, 10330.0f, 10331.0f, 10332.0f, 10333.0f, 10334.0f, 10335.0f, 10336.0f, 10337.0f, 10338.0f, 10339.0f, 10340.0f, 10341.0f, 10342.0f, 10343.0f, 10344.0f, 10345.0f, 10346.0f, 10347.0f, 10348.0f, 10349.0f, 10350.0f, 10351.0f, 10352.0f, 10353.0f, 10354.0f, 10355.0f, 10356.0f, 10357.0f, 10358.0f, 10359.0f, 10360.0f, 10361.0f, 10362.0f, 10363.0f, 10364.0f, 10365.0f, 10366.0f, 10367.0f, 10368.0f, 10369.0f, 10370.0f, 10371.0f, 10372.0f, 10373.0f, 10374.0f, 10375.0f, 10376.0f, 10377.0f, 10378.0f, 10379.0f, 10380.0f, 10381.0f, 10382.0f, 10383.0f, 10384.0f, 10385.0f, 10386.0f, 10387.0f, 10388.0f, 10389.0f, 10390.0f, 10391.0f, 10392.0f, 10393.0f, 10394.0f, 10395.0f, 10396.0f, 10397.0f, 10398.0f, 10399.0f, 10400.0f, 10401.0f, 10402.0f, 10403.0f, 10404.0f, 10405.0f, 10406.0f, 10407.0f, 10408.0f, 10409.0f, 10410.0f, 10411.0f, 10412.0f, 10413.0f, 10414.0f, 10415.0f, 10416.0f, 10417.0f, 10418.0f, 10419.0f, 10420.0f, 10421.0f, 10422.0f, 10423.0f, 10424.0f, 10425.0f, 10426.0f, 10427.0f, 10428.0f, 10429.0f, 10430.0f, 10431.0f, 10432.0f, 10433.0f, 10434.0f, 10435.0f, 10436.0f, 10437.0f, 10438.0f, 10439.0f, 10440.0f, 10441.0f, 10442.0f, 10443.0f, 10444.0f, 10445.0f, 10446.0f, 10447.0f, 10448.0f, 10449.0f, 10450.0f, 10451.0f, 10452.0f, 10453.0f, 10454.0f, 10455.0f, 10456.0f, 10457.0f, 10458.0f, 10459.0f, 10460.0f, 10461.0f, 10462.0f, 10463.0f, 10464.0f, 10465.0f, 10466.0f, 10467.0f, 10468.0f, 10469.0f, 10470.0f, 10471.0f, 10472.0f, 10473.0f, 10474.0f, 10475.0f, 10476.0f, 10477.0f, 10478.0f, 10479.0f, 10480.0f, 10481.0f, 10482.0f, 10483.0f, 10484.0f, 10485.0f, 10486.0f, 10487.0f, 10488.0f, 10489.0f, 10490.0f, 10491.0f, 10492.0f, 10493.0f, 10494.0f, 10495.0f, 10496.0f, 10497.0f, 10498.0f, 10499.0f, 10500.0f, 10501.0f, 10502.0f, 10503.0f, 10504.0f, 10505.0f, 10506.0f, 10507.0f, 10508.0f, 10509.0f, 10510.0f, 10511.0f, 10512.0f, 10513.0f, 10514.0f, 10515.0f, 10516.0f, 10517.0f, 10518.0f, 10519.0f, 10520.0f, 10521.0f, 10522.0f, 10523.0f, 10524.0f, 10525.0f, 10526.0f, 10527.0f, 10528.0f, 10529.0f, 10530.0f, 10531.0f, 10532.0f, 10533.0f, 10534.0f, 10535.0f, 10536.0f, 10537.0f, 10538.0f, 10539.0f, 10540.0f, 10541.0f, 10542.0f, 10543.0f, 10544.0f, 10545.0f, 10546.0f, 10547.0f, 10548.0f, 10549.0f, 10550.0f, 10551.0f, 10552.0f, 10553.0f, 10554.0f, 10555.0f, 10556.0f, 10557.0f, 10558.0f, 10559.0f, 10560.0f, 10561.0f, 10562.0f, 10563.0f, 10564.0f, 10565.0f, 10566.0f, 10567.0f, 10568.0f, 10569.0f, 10570.0f, 10571.0f, 10572.0f, 10573.0f, 10574.0f, 10575.0f, 10576.0f, 10577.0f, 10578.0f, 10579.0f, 10580.0f, 10581.0f, 10582.0f, 10583.0f, 10584.0f, 10585.0f, 10586.0f, 10587.0f, 10588.0f, 10589.0f, 10590.0f, 10591.0f, 10592.0f, 10593.0f, 10594.0f, 10595.0f, 10596.0f, 10597.0f, 10598.0f, 10599.0f, 10600.0f, 10601.0f, 10602.0f, 10603.0f, 10604.0f, 10605.0f, 10606.0f, 10607.0f, 10608.0f, 10609.0f, 10610.0f, 10611.0f, 10612.0f, 10613.0f, 10614.0f, 10615.0f, 10616.0f, 10617.0f, 10618.0f, 10619.0f, 10620.0f, 10621.0f, 10622.0f, 10623.0f, 10624.0f, 10625.0f, 10626.0f, 10627.0f, 10628.0f, 10629.0f, 10630.0f, 10631.0f, 10632.0f, 10633.0f, 10634.0f, 10635.0f, 10636.0f, 10637.0f, 10638.0f, 10639.0f, 10640.0f, 10641.0f, 10642.0f, 10643.0f, 10644.0f, 10645.0f, 10646.0f, 10647.0f, 10648.0f, 10649.0f, 10650.0f, 10651.0f, 10652.0f, 10653.0f, 10654.0f, 10655.0f, 10656.0f, 10657.0f, 10658.0f, 10659.0f, 10660.0f, 10661.0f, 10662.0f, 10663.0f, 10664.0f, 10665.0f, 10666.0f, 10667.0f, 10668.0f, 10669.0f, 10670.0f, 10671.0f, 10672.0f, 10673.0f, 10674.0f, 10675.0f, 10676.0f, 10677.0f, 10678.0f, 10679.0f, 10680.0f, 10681.0f, 10682.0f, 10683.0f, 10684.0f, 10685.0f, 10686.0f, 10687.0f, 10688.0f, 10689.0f, 10690.0f, 10691.0f, 10692.0f, 10693.0f, 10694.0f, 10695.0f, 10696.0f, 10697.0f, 10698.0f, 10699.0f, 10700.0f, 10701.0f, 10702.0f, 10703.0f, 10704.0f, 10705.0f, 10706.0f, 10707.0f, 10708.0f, 10709.0f, 10710.0f, 10711.0f, 10712.0f, 10713.0f, 10714.0f, 10715.0f, 10716.0f, 10717.0f, 10718.0f, 10719.0f, 10720.0f, 10721.0f, 10722.0f, 10723.0f, 10724.0f, 10725.0f, 10726.0f, 10727.0f, 10728.0f, 10729.0f, 10730.0f, 10731.0f, 10732.0f, 10733.0f, 10734.0f, 10735.0f, 10736.0f, 10737.0f, 10738.0f, 10739.0f, 10740.0f, 10741.0f, 10742.0f, 10743.0f, 10744.0f, 10745.0f, 10746.0f, 10747.0f, 10748.0f, 10749.0f, 10750.0f, 10751.0f, 10752.0f, 10753.0f, 10754.0f, 10755.0f, 10756.0f, 10757.0f, 10758.0f, 10759.0f, 10760.0f, 10761.0f, 10762.0f, 10763.0f, 10764.0f, 10765.0f, 10766.0f, 10767.0f, 10768.0f, 10769.0f, 10770.0f, 10771.0f, 10772.0f, 10773.0f, 10774.0f, 10775.0f, 10776.0f, 10777.0f, 10778.0f, 10779.0f, 10780.0f, 10781.0f, 10782.0f, 10783.0f, 10784.0f, 10785.0f, 10786.0f, 10787.0f, 10788.0f, 10789.0f, 10790.0f, 10791.0f, 10792.0f, 10793.0f, 10794.0f, 10795.0f, 10796.0f, 10797.0f, 10798.0f, 10799.0f, 10800.0f, 10801.0f, 10802.0f, 10803.0f, 10804.0f, 10805.0f, 10806.0f, 10807.0f, 10808.0f, 10809.0f, 10810.0f, 10811.0f, 10812.0f, 10813.0f, 10814.0f, 10815.0f, 10816.0f, 10817.0f, 10818.0f, 10819.0f, 10820.0f, 10821.0f, 10822.0f, 10823.0f, 10824.0f, 10825.0f, 10826.0f, 10827.0f, 10828.0f, 10829.0f, 10830.0f, 10831.0f, 10832.0f, 10833.0f, 10834.0f, 10835.0f, 10836.0f, 10837.0f, 10838.0f, 10839.0f, 10840.0f, 10841.0f, 10842.0f, 10843.0f, 10844.0f, 10845.0f, 10846.0f, 10847.0f, 10848.0f, 10849.0f, 10850.0f, 10851.0f, 10852.0f, 10853.0f, 10854.0f, 10855.0f, 10856.0f, 10857.0f, 10858.0f, 10859.0f, 10860.0f, 10861.0f, 10862.0f, 10863.0f, 10864.0f, 10865.0f, 10866.0f, 10867.0f, 10868.0f, 10869.0f, 10870.0f, 10871.0f, 10872.0f, 10873.0f, 10874.0f, 10875.0f, 10876.0f, 10877.0f, 10878.0f, 10879.0f, 10880.0f, 10881.0f, 10882.0f, 10883.0f, 10884.0f, 10885.0f, 10886.0f, 10887.0f, 10888.0f, 10889.0f, 10890.0f, 10891.0f, 10892.0f, 10893.0f, 10894.0f, 10895.0f, 10896.0f, 10897.0f, 10898.0f, 10899.0f, 10900.0f, 10901.0f, 10902.0f, 10903.0f, 10904.0f, 10905.0f, 10906.0f, 10907.0f, 10908.0f, 10909.0f, 10910.0f, 10911.0f, 10912.0f, 10913.0f, 10914.0f, 10915.0f, 10916.0f, 10917.0f, 10918.0f, 10919.0f, 10920.0f, 10921.0f, 10922.0f, 10923.0f, 10924.0f, 10925.0f, 10926.0f, 10927.0f, 10928.0f, 10929.0f, 10930.0f, 10931.0f, 10932.0f, 10933.0f, 10934.0f, 10935.0f, 10936.0f, 10937.0f, 10938.0f, 10939.0f, 10940.0f, 10941.0f, 10942.0f, 10943.0f, 10944.0f, 10945.0f, 10946.0f, 10947.0f, 10948.0f, 10949.0f, 10950.0f, 10951.0f, 10952.0f, 10953.0f, 10954.0f, 10955.0f, 10956.0f, 10957.0f, 10958.0f, 10959.0f, 10960.0f, 10961.0f, 10962.0f, 10963.0f, 10964.0f, 10965.0f, 10966.0f, 10967.0f, 10968.0f, 10969.0f, 10970.0f, 10971.0f, 10972.0f, 10973.0f, 10974.0f, 10975.0f, 10976.0f, 10977.0f, 10978.0f, 10979.0f, 10980.0f, 10981.0f, 10982.0f, 10983.0f, 10984.0f, 10985.0f, 10986.0f, 10987.0f, 10988.0f, 10989.0f, 10990.0f, 10991.0f, 10992.0f, 10993.0f, 10994.0f, 10995.0f, 10996.0f, 10997.0f, 10998.0f, 10999.0f, 11000.0f, 11001.0f, 11002.0f, 11003.0f, 11004.0f, 11005.0f, 11006.0f, 11007.0f, 11008.0f, 11009.0f, 11010.0f, 11011.0f, 11012.0f, 11013.0f, 11014.0f, 11015.0f, 11016.0f, 11017.0f, 11018.0f, 11019.0f, 11020.0f, 11021.0f, 11022.0f, 11023.0f, 11024.0f, 11025.0f, 11026.0f, 11027.0f, 11028.0f, 11029.0f, 11030.0f, 11031.0f, 11032.0f, 11033.0f, 11034.0f, 11035.0f, 11036.0f, 11037.0f, 11038.0f, 11039.0f, 11040.0f, 11041.0f, 11042.0f, 11043.0f, 11044.0f, 11045.0f, 11046.0f, 11047.0f, 11048.0f, 11049.0f, 11050.0f, 11051.0f, 11052.0f, 11053.0f, 11054.0f, 11055.0f, 11056.0f, 11057.0f, 11058.0f, 11059.0f, 11060.0f, 11061.0f, 11062.0f, 11063.0f, 11064.0f, 11065.0f, 11066.0f, 11067.0f, 11068.0f, 11069.0f, 11070.0f, 11071.0f, 11072.0f, 11073.0f, 11074.0f, 11075.0f, 11076.0f, 11077.0f, 11078.0f, 11079.0f, 11080.0f, 11081.0f, 11082.0f, 11083.0f, 11084.0f, 11085.0f, 11086.0f, 11087.0f, 11088.0f, 11089.0f, 11090.0f, 11091.0f, 11092.0f, 11093.0f, 11094.0f, 11095.0f, 11096.0f, 11097.0f, 11098.0f, 11099.0f, 11100.0f, 11101.0f, 11102.0f, 11103.0f, 11104.0f, 11105.0f, 11106.0f, 11107.0f, 11108.0f, 11109.0f, 11110.0f, 11111.0f, 11112.0f, 11113.0f, 11114.0f, 11115.0f, 11116.0f, 11117.0f, 11118.0f, 11119.0f, 11120.0f, 11121.0f, 11122.0f, 11123.0f, 11124.0f, 11125.0f, 11126.0f, 11127.0f, 11128.0f, 11129.0f, 11130.0f, 11131.0f, 11132.0f, 11133.0f, 11134.0f, 11135.0f, 11136.0f, 11137.0f, 11138.0f, 11139.0f, 11140.0f, 11141.0f, 11142.0f, 11143.0f, 11144.0f, 11145.0f, 11146.0f, 11147.0f, 11148.0f, 11149.0f, 11150.0f, 11151.0f, 11152.0f, 11153.0f, 11154.0f, 11155.0f, 11156.0f, 11157.0f, 11158.0f, 11159.0f, 11160.0f, 11161.0f, 11162.0f, 11163.0f, 11164.0f, 11165.0f, 11166.0f, 11167.0f, 11168.0f, 11169.0f, 11170.0f, 11171.0f, 11172.0f, 11173.0f, 11174.0f, 11175.0f, 11176.0f, 11177.0f, 11178.0f, 11179.0f, 11180.0f, 11181.0f, 11182.0f, 11183.0f, 11184.0f, 11185.0f, 11186.0f, 11187.0f, 11188.0f, 11189.0f, 11190.0f, 11191.0f, 11192.0f, 11193.0f, 11194.0f, 11195.0f, 11196.0f, 11197.0f, 11198.0f, 11199.0f, 11200.0f, 11201.0f, 11202.0f, 11203.0f, 11204.0f, 11205.0f, 11206.0f, 11207.0f, 11208.0f, 11209.0f, 11210.0f, 11211.0f, 11212.0f, 11213.0f, 11214.0f, 11215.0f, 11216.0f, 11217.0f, 11218.0f, 11219.0f, 11220.0f, 11221.0f, 11222.0f, 11223.0f, 11224.0f, 11225.0f, 11226.0f, 11227.0f, 11228.0f, 11229.0f, 11230.0f, 11231.0f, 11232.0f, 11233.0f, 11234.0f, 11235.0f, 11236.0f, 11237.0f, 11238.0f, 11239.0f, 11240.0f, 11241.0f, 11242.0f, 11243.0f, 11244.0f, 11245.0f, 11246.0f, 11247.0f, 11248.0f, 11249.0f, 11250.0f, 11251.0f, 11252.0f, 11253.0f, 11254.0f, 11255.0f, 11256.0f, 11257.0f, 11258.0f, 11259.0f, 11260.0f, 11261.0f, 11262.0f, 11263.0f, 11264.0f, 11265.0f, 11266.0f, 11267.0f, 11268.0f, 11269.0f, 11270.0f, 11271.0f, 11272.0f, 11273.0f, 11274.0f, 11275.0f, 11276.0f, 11277.0f, 11278.0f, 11279.0f, 11280.0f, 11281.0f, 11282.0f, 11283.0f, 11284.0f, 11285.0f, 11286.0f, 11287.0f, 11288.0f, 11289.0f, 11290.0f, 11291.0f, 11292.0f, 11293.0f, 11294.0f, 11295.0f, 11296.0f, 11297.0f, 11298.0f, 11299.0f, 11300.0f, 11301.0f, 11302.0f, 11303.0f, 11304.0f, 11305.0f, 11306.0f, 11307.0f, 11308.0f, 11309.0f, 11310.0f, 11311.0f, 11312.0f, 11313.0f, 11314.0f, 11315.0f, 11316.0f, 11317.0f, 11318.0f, 11319.0f, 11320.0f, 11321.0f, 11322.0f, 11323.0f, 11324.0f, 11325.0f, 11326.0f, 11327.0f, 11328.0f, 11329.0f, 11330.0f, 11331.0f, 11332.0f, 11333.0f, 11334.0f, 11335.0f, 11336.0f, 11337.0f, 11338.0f, 11339.0f, 11340.0f, 11341.0f, 11342.0f, 11343.0f, 11344.0f, 11345.0f, 11346.0f, 11347.0f, 11348.0f, 11349.0f, 11350.0f, 11351.0f, 11352.0f, 11353.0f, 11354.0f, 11355.0f, 11356.0f, 11357.0f, 11358.0f, 11359.0f, 11360.0f, 11361.0f, 11362.0f, 11363.0f, 11364.0f, 11365.0f, 11366.0f, 11367.0f, 11368.0f, 11369.0f, 11370.0f, 11371.0f, 11372.0f, 11373.0f, 11374.0f, 11375.0f, 11376.0f, 11377.0f, 11378.0f, 11379.0f, 11380.0f, 11381.0f, 11382.0f, 11383.0f, 11384.0f, 11385.0f, 11386.0f, 11387.0f, 11388.0f, 11389.0f, 11390.0f, 11391.0f, 11392.0f, 11393.0f, 11394.0f, 11395.0f, 11396.0f, 11397.0f, 11398.0f, 11399.0f, 11400.0f, 11401.0f, 11402.0f, 11403.0f, 11404.0f, 11405.0f, 11406.0f, 11407.0f, 11408.0f, 11409.0f, 11410.0f, 11411.0f, 11412.0f, 11413.0f, 11414.0f, 11415.0f, 11416.0f, 11417.0f, 11418.0f, 11419.0f, 11420.0f, 11421.0f, 11422.0f, 11423.0f, 11424.0f, 11425.0f, 11426.0f, 11427.0f, 11428.0f, 11429.0f, 11430.0f, 11431.0f, 11432.0f, 11433.0f, 11434.0f, 11435.0f, 11436.0f, 11437.0f, 11438.0f, 11439.0f, 11440.0f, 11441.0f, 11442.0f, 11443.0f, 11444.0f, 11445.0f, 11446.0f, 11447.0f, 11448.0f, 11449.0f, 11450.0f, 11451.0f, 11452.0f, 11453.0f, 11454.0f, 11455.0f, 11456.0f, 11457.0f, 11458.0f, 11459.0f, 11460.0f, 11461.0f, 11462.0f, 11463.0f, 11464.0f, 11465.0f, 11466.0f, 11467.0f, 11468.0f, 11469.0f, 11470.0f, 11471.0f, 11472.0f, 11473.0f, 11474.0f, 11475.0f, 11476.0f, 11477.0f, 11478.0f, 11479.0f, 11480.0f, 11481.0f, 11482.0f, 11483.0f, 11484.0f, 11485.0f, 11486.0f, 11487.0f, 11488.0f, 11489.0f, 11490.0f, 11491.0f, 11492.0f, 11493.0f, 11494.0f, 11495.0f, 11496.0f, 11497.0f, 11498.0f, 11499.0f, 11500.0f, 11501.0f, 11502.0f, 11503.0f, 11504.0f, 11505.0f, 11506.0f, 11507.0f, 11508.0f, 11509.0f, 11510.0f, 11511.0f, 11512.0f, 11513.0f, 11514.0f, 11515.0f, 11516.0f, 11517.0f, 11518.0f, 11519.0f, 11520.0f, 11521.0f, 11522.0f, 11523.0f, 11524.0f, 11525.0f, 11526.0f, 11527.0f, 11528.0f, 11529.0f, 11530.0f, 11531.0f, 11532.0f, 11533.0f, 11534.0f, 11535.0f, 11536.0f, 11537.0f, 11538.0f, 11539.0f, 11540.0f, 11541.0f, 11542.0f, 11543.0f, 11544.0f, 11545.0f, 11546.0f, 11547.0f, 11548.0f, 11549.0f, 11550.0f, 11551.0f, 11552.0f, 11553.0f, 11554.0f, 11555.0f, 11556.0f, 11557.0f, 11558.0f, 11559.0f, 11560.0f, 11561.0f, 11562.0f, 11563.0f, 11564.0f, 11565.0f, 11566.0f, 11567.0f, 11568.0f, 11569.0f, 11570.0f, 11571.0f, 11572.0f, 11573.0f, 11574.0f, 11575.0f, 11576.0f, 11577.0f, 11578.0f, 11579.0f, 11580.0f, 11581.0f, 11582.0f, 11583.0f, 11584.0f, 11585.0f, 11586.0f, 11587.0f, 11588.0f, 11589.0f, 11590.0f, 11591.0f, 11592.0f, 11593.0f, 11594.0f, 11595.0f, 11596.0f, 11597.0f, 11598.0f, 11599.0f, 11600.0f, 11601.0f, 11602.0f, 11603.0f, 11604.0f, 11605.0f, 11606.0f, 11607.0f, 11608.0f, 11609.0f, 11610.0f, 11611.0f, 11612.0f, 11613.0f, 11614.0f, 11615.0f, 11616.0f, 11617.0f, 11618.0f, 11619.0f, 11620.0f, 11621.0f, 11622.0f, 11623.0f, 11624.0f, 11625.0f, 11626.0f, 11627.0f, 11628.0f, 11629.0f, 11630.0f, 11631.0f, 11632.0f, 11633.0f, 11634.0f, 11635.0f, 11636.0f, 11637.0f, 11638.0f, 11639.0f, 11640.0f, 11641.0f, 11642.0f, 11643.0f, 11644.0f, 11645.0f, 11646.0f, 11647.0f, 11648.0f, 11649.0f, 11650.0f, 11651.0f, 11652.0f, 11653.0f, 11654.0f, 11655.0f, 11656.0f, 11657.0f, 11658.0f, 11659.0f, 11660.0f, 11661.0f, 11662.0f, 11663.0f, 11664.0f, 11665.0f, 11666.0f, 11667.0f, 11668.0f, 11669.0f, 11670.0f, 11671.0f, 11672.0f, 11673.0f, 11674.0f, 11675.0f, 11676.0f, 11677.0f, 11678.0f, 11679.0f, 11680.0f, 11681.0f, 11682.0f, 11683.0f, 11684.0f, 11685.0f, 11686.0f, 11687.0f, 11688.0f, 11689.0f, 11690.0f, 11691.0f, 11692.0f, 11693.0f, 11694.0f, 11695.0f, 11696.0f, 11697.0f, 11698.0f, 11699.0f, 11700.0f, 11701.0f, 11702.0f, 11703.0f, 11704.0f, 11705.0f, 11706.0f, 11707.0f, 11708.0f, 11709.0f, 11710.0f, 11711.0f, 11712.0f, 11713.0f, 11714.0f, 11715.0f, 11716.0f, 11717.0f, 11718.0f, 11719.0f, 11720.0f, 11721.0f, 11722.0f, 11723.0f, 11724.0f, 11725.0f, 11726.0f, 11727.0f, 11728.0f, 11729.0f, 11730.0f, 11731.0f, 11732.0f, 11733.0f, 11734.0f, 11735.0f, 11736.0f, 11737.0f, 11738.0f, 11739.0f, 11740.0f, 11741.0f, 11742.0f, 11743.0f, 11744.0f, 11745.0f, 11746.0f, 11747.0f, 11748.0f, 11749.0f, 11750.0f, 11751.0f, 11752.0f, 11753.0f, 11754.0f, 11755.0f, 11756.0f, 11757.0f, 11758.0f, 11759.0f, 11760.0f, 11761.0f, 11762.0f, 11763.0f, 11764.0f, 11765.0f, 11766.0f, 11767.0f, 11768.0f, 11769.0f, 11770.0f, 11771.0f, 11772.0f, 11773.0f, 11774.0f, 11775.0f, 11776.0f, 11777.0f, 11778.0f, 11779.0f, 11780.0f, 11781.0f, 11782.0f, 11783.0f, 11784.0f, 11785.0f, 11786.0f, 11787.0f, 11788.0f, 11789.0f, 11790.0f, 11791.0f, 11792.0f, 11793.0f, 11794.0f, 11795.0f, 11796.0f, 11797.0f, 11798.0f, 11799.0f, 11800.0f, 11801.0f, 11802.0f, 11803.0f, 11804.0f, 11805.0f, 11806.0f, 11807.0f, 11808.0f, 11809.0f, 11810.0f, 11811.0f, 11812.0f, 11813.0f, 11814.0f, 11815.0f, 11816.0f, 11817.0f, 11818.0f, 11819.0f, 11820.0f, 11821.0f, 11822.0f, 11823.0f, 11824.0f, 11825.0f, 11826.0f, 11827.0f, 11828.0f, 11829.0f, 11830.0f, 11831.0f, 11832.0f, 11833.0f, 11834.0f, 11835.0f, 11836.0f, 11837.0f, 11838.0f, 11839.0f, 11840.0f, 11841.0f, 11842.0f, 11843.0f, 11844.0f, 11845.0f, 11846.0f, 11847.0f, 11848.0f, 11849.0f, 11850.0f, 11851.0f, 11852.0f, 11853.0f, 11854.0f, 11855.0f, 11856.0f, 11857.0f, 11858.0f, 11859.0f, 11860.0f, 11861.0f, 11862.0f, 11863.0f, 11864.0f, 11865.0f, 11866.0f, 11867.0f, 11868.0f, 11869.0f, 11870.0f, 11871.0f, 11872.0f, 11873.0f, 11874.0f, 11875.0f, 11876.0f, 11877.0f, 11878.0f, 11879.0f, 11880.0f, 11881.0f, 11882.0f, 11883.0f, 11884.0f, 11885.0f, 11886.0f, 11887.0f, 11888.0f, 11889.0f, 11890.0f, 11891.0f, 11892.0f, 11893.0f, 11894.0f, 11895.0f, 11896.0f, 11897.0f, 11898.0f, 11899.0f, 11900.0f, 11901.0f, 11902.0f, 11903.0f, 11904.0f, 11905.0f, 11906.0f, 11907.0f, 11908.0f, 11909.0f, 11910.0f, 11911.0f, 11912.0f, 11913.0f, 11914.0f, 11915.0f, 11916.0f, 11917.0f, 11918.0f, 11919.0f, 11920.0f, 11921.0f, 11922.0f, 11923.0f, 11924.0f, 11925.0f, 11926.0f, 11927.0f, 11928.0f, 11929.0f, 11930.0f, 11931.0f, 11932.0f, 11933.0f, 11934.0f, 11935.0f, 11936.0f, 11937.0f, 11938.0f, 11939.0f, 11940.0f, 11941.0f, 11942.0f, 11943.0f, 11944.0f, 11945.0f, 11946.0f, 11947.0f, 11948.0f, 11949.0f, 11950.0f, 11951.0f, 11952.0f, 11953.0f, 11954.0f, 11955.0f, 11956.0f, 11957.0f, 11958.0f, 11959.0f}}, {1, {11960.0f, 11961.0f, 11962.0f, 11963.0f, 11964.0f, 11965.0f, 11966.0f, 11967.0f, 11968.0f, 11969.0f, 11970.0f, 11971.0f, 11972.0f, 11973.0f, 11974.0f, 11975.0f, 11976.0f, 11977.0f, 11978.0f, 11979.0f, 11980.0f, 11981.0f, 11982.0f, 11983.0f, 11984.0f, 11985.0f, 11986.0f, 11987.0f, 11988.0f, 11989.0f, 11990.0f, 11991.0f, 11992.0f, 11993.0f, 11994.0f, 11995.0f, 11996.0f, 11997.0f, 11998.0f, 11999.0f, 12000.0f, 12001.0f, 12002.0f, 12003.0f, 12004.0f, 12005.0f, 12006.0f, 12007.0f, 12008.0f, 12009.0f, 12010.0f, 12011.0f, 12012.0f, 12013.0f, 12014.0f, 12015.0f, 12016.0f, 12017.0f, 12018.0f, 12019.0f, 12020.0f, 12021.0f, 12022.0f, 12023.0f, 12024.0f, 12025.0f, 12026.0f, 12027.0f, 12028.0f, 12029.0f, 12030.0f, 12031.0f, 12032.0f, 12033.0f, 12034.0f, 12035.0f, 12036.0f, 12037.0f, 12038.0f, 12039.0f, 12040.0f, 12041.0f, 12042.0f, 12043.0f, 12044.0f, 12045.0f, 12046.0f, 12047.0f, 12048.0f, 12049.0f, 12050.0f, 12051.0f, 12052.0f, 12053.0f, 12054.0f, 12055.0f, 12056.0f, 12057.0f, 12058.0f, 12059.0f, 12060.0f, 12061.0f, 12062.0f, 12063.0f, 12064.0f, 12065.0f, 12066.0f, 12067.0f, 12068.0f, 12069.0f, 12070.0f, 12071.0f, 12072.0f, 12073.0f, 12074.0f, 12075.0f, 12076.0f, 12077.0f, 12078.0f, 12079.0f, 12080.0f, 12081.0f, 12082.0f, 12083.0f, 12084.0f, 12085.0f, 12086.0f, 12087.0f, 12088.0f, 12089.0f, 12090.0f, 12091.0f, 12092.0f, 12093.0f, 12094.0f, 12095.0f, 12096.0f, 12097.0f, 12098.0f, 12099.0f, 12100.0f, 12101.0f, 12102.0f, 12103.0f, 12104.0f, 12105.0f, 12106.0f, 12107.0f, 12108.0f, 12109.0f, 12110.0f, 12111.0f, 12112.0f, 12113.0f, 12114.0f, 12115.0f, 12116.0f, 12117.0f, 12118.0f, 12119.0f, 12120.0f, 12121.0f, 12122.0f, 12123.0f, 12124.0f, 12125.0f, 12126.0f, 12127.0f, 12128.0f, 12129.0f, 12130.0f, 12131.0f, 12132.0f, 12133.0f, 12134.0f, 12135.0f, 12136.0f, 12137.0f, 12138.0f, 12139.0f, 12140.0f, 12141.0f, 12142.0f, 12143.0f, 12144.0f, 12145.0f, 12146.0f, 12147.0f, 12148.0f, 12149.0f, 12150.0f, 12151.0f, 12152.0f, 12153.0f, 12154.0f, 12155.0f, 12156.0f, 12157.0f, 12158.0f, 12159.0f, 12160.0f, 12161.0f, 12162.0f, 12163.0f, 12164.0f, 12165.0f, 12166.0f, 12167.0f, 12168.0f, 12169.0f, 12170.0f, 12171.0f, 12172.0f, 12173.0f, 12174.0f, 12175.0f, 12176.0f, 12177.0f, 12178.0f, 12179.0f, 12180.0f, 12181.0f, 12182.0f, 12183.0f, 12184.0f, 12185.0f, 12186.0f, 12187.0f, 12188.0f, 12189.0f, 12190.0f, 12191.0f, 12192.0f, 12193.0f, 12194.0f, 12195.0f, 12196.0f, 12197.0f, 12198.0f, 12199.0f, 12200.0f, 12201.0f, 12202.0f, 12203.0f, 12204.0f, 12205.0f, 12206.0f, 12207.0f, 12208.0f, 12209.0f, 12210.0f, 12211.0f, 12212.0f, 12213.0f, 12214.0f, 12215.0f, 12216.0f, 12217.0f, 12218.0f, 12219.0f, 12220.0f, 12221.0f, 12222.0f, 12223.0f, 12224.0f, 12225.0f, 12226.0f, 12227.0f, 12228.0f, 12229.0f, 12230.0f, 12231.0f, 12232.0f, 12233.0f, 12234.0f, 12235.0f, 12236.0f, 12237.0f, 12238.0f, 12239.0f, 12240.0f, 12241.0f, 12242.0f, 12243.0f, 12244.0f, 12245.0f, 12246.0f, 12247.0f, 12248.0f, 12249.0f, 12250.0f, 12251.0f, 12252.0f, 12253.0f, 12254.0f, 12255.0f, 12256.0f, 12257.0f, 12258.0f, 12259.0f, 12260.0f, 12261.0f, 12262.0f, 12263.0f, 12264.0f, 12265.0f, 12266.0f, 12267.0f, 12268.0f, 12269.0f, 12270.0f, 12271.0f, 12272.0f, 12273.0f, 12274.0f, 12275.0f, 12276.0f, 12277.0f, 12278.0f, 12279.0f, 12280.0f, 12281.0f, 12282.0f, 12283.0f, 12284.0f, 12285.0f, 12286.0f, 12287.0f, 12288.0f, 12289.0f, 12290.0f, 12291.0f, 12292.0f, 12293.0f, 12294.0f, 12295.0f, 12296.0f, 12297.0f, 12298.0f, 12299.0f, 12300.0f, 12301.0f, 12302.0f, 12303.0f, 12304.0f, 12305.0f, 12306.0f, 12307.0f, 12308.0f, 12309.0f, 12310.0f, 12311.0f, 12312.0f, 12313.0f, 12314.0f, 12315.0f, 12316.0f, 12317.0f, 12318.0f, 12319.0f, 12320.0f, 12321.0f, 12322.0f, 12323.0f, 12324.0f, 12325.0f, 12326.0f, 12327.0f, 12328.0f, 12329.0f, 12330.0f, 12331.0f, 12332.0f, 12333.0f, 12334.0f, 12335.0f, 12336.0f, 12337.0f, 12338.0f, 12339.0f, 12340.0f, 12341.0f, 12342.0f, 12343.0f, 12344.0f, 12345.0f, 12346.0f, 12347.0f, 12348.0f, 12349.0f, 12350.0f, 12351.0f, 12352.0f, 12353.0f, 12354.0f, 12355.0f, 12356.0f, 12357.0f, 12358.0f, 12359.0f, 12360.0f, 12361.0f, 12362.0f, 12363.0f, 12364.0f, 12365.0f, 12366.0f, 12367.0f, 12368.0f, 12369.0f, 12370.0f, 12371.0f, 12372.0f, 12373.0f, 12374.0f, 12375.0f, 12376.0f, 12377.0f, 12378.0f, 12379.0f, 12380.0f, 12381.0f, 12382.0f, 12383.0f, 12384.0f, 12385.0f, 12386.0f, 12387.0f, 12388.0f, 12389.0f, 12390.0f, 12391.0f, 12392.0f, 12393.0f, 12394.0f, 12395.0f, 12396.0f, 12397.0f, 12398.0f, 12399.0f, 12400.0f, 12401.0f, 12402.0f, 12403.0f, 12404.0f, 12405.0f, 12406.0f, 12407.0f, 12408.0f, 12409.0f, 12410.0f, 12411.0f, 12412.0f, 12413.0f, 12414.0f, 12415.0f, 12416.0f, 12417.0f, 12418.0f, 12419.0f, 12420.0f, 12421.0f, 12422.0f, 12423.0f, 12424.0f, 12425.0f, 12426.0f, 12427.0f, 12428.0f, 12429.0f, 12430.0f, 12431.0f, 12432.0f, 12433.0f, 12434.0f, 12435.0f, 12436.0f, 12437.0f, 12438.0f, 12439.0f, 12440.0f, 12441.0f, 12442.0f, 12443.0f, 12444.0f, 12445.0f, 12446.0f, 12447.0f, 12448.0f, 12449.0f, 12450.0f, 12451.0f, 12452.0f, 12453.0f, 12454.0f, 12455.0f, 12456.0f, 12457.0f, 12458.0f, 12459.0f, 12460.0f, 12461.0f, 12462.0f, 12463.0f, 12464.0f, 12465.0f, 12466.0f, 12467.0f, 12468.0f, 12469.0f, 12470.0f, 12471.0f, 12472.0f, 12473.0f, 12474.0f, 12475.0f, 12476.0f, 12477.0f, 12478.0f, 12479.0f, 12480.0f, 12481.0f, 12482.0f, 12483.0f, 12484.0f, 12485.0f, 12486.0f, 12487.0f, 12488.0f, 12489.0f, 12490.0f, 12491.0f, 12492.0f, 12493.0f, 12494.0f, 12495.0f, 12496.0f, 12497.0f, 12498.0f, 12499.0f, 12500.0f, 12501.0f, 12502.0f, 12503.0f, 12504.0f, 12505.0f, 12506.0f, 12507.0f, 12508.0f, 12509.0f, 12510.0f, 12511.0f, 12512.0f, 12513.0f, 12514.0f, 12515.0f, 12516.0f, 12517.0f, 12518.0f, 12519.0f, 12520.0f, 12521.0f, 12522.0f, 12523.0f, 12524.0f, 12525.0f, 12526.0f, 12527.0f, 12528.0f, 12529.0f, 12530.0f, 12531.0f, 12532.0f, 12533.0f, 12534.0f, 12535.0f, 12536.0f, 12537.0f, 12538.0f, 12539.0f, 12540.0f, 12541.0f, 12542.0f, 12543.0f, 12544.0f, 12545.0f, 12546.0f, 12547.0f, 12548.0f, 12549.0f, 12550.0f, 12551.0f, 12552.0f, 12553.0f, 12554.0f, 12555.0f, 12556.0f, 12557.0f, 12558.0f, 12559.0f, 12560.0f, 12561.0f, 12562.0f, 12563.0f, 12564.0f, 12565.0f, 12566.0f, 12567.0f, 12568.0f, 12569.0f, 12570.0f, 12571.0f, 12572.0f, 12573.0f, 12574.0f, 12575.0f, 12576.0f, 12577.0f, 12578.0f, 12579.0f, 12580.0f, 12581.0f, 12582.0f, 12583.0f, 12584.0f, 12585.0f, 12586.0f, 12587.0f, 12588.0f, 12589.0f, 12590.0f, 12591.0f, 12592.0f, 12593.0f, 12594.0f, 12595.0f, 12596.0f, 12597.0f, 12598.0f, 12599.0f, 12600.0f, 12601.0f, 12602.0f, 12603.0f, 12604.0f, 12605.0f, 12606.0f, 12607.0f, 12608.0f, 12609.0f, 12610.0f, 12611.0f, 12612.0f, 12613.0f, 12614.0f, 12615.0f, 12616.0f, 12617.0f, 12618.0f, 12619.0f, 12620.0f, 12621.0f, 12622.0f, 12623.0f, 12624.0f, 12625.0f, 12626.0f, 12627.0f, 12628.0f, 12629.0f, 12630.0f, 12631.0f, 12632.0f, 12633.0f, 12634.0f, 12635.0f, 12636.0f, 12637.0f, 12638.0f, 12639.0f, 12640.0f, 12641.0f, 12642.0f, 12643.0f, 12644.0f, 12645.0f, 12646.0f, 12647.0f, 12648.0f, 12649.0f, 12650.0f, 12651.0f, 12652.0f, 12653.0f, 12654.0f, 12655.0f, 12656.0f, 12657.0f, 12658.0f, 12659.0f, 12660.0f, 12661.0f, 12662.0f, 12663.0f, 12664.0f, 12665.0f, 12666.0f, 12667.0f, 12668.0f, 12669.0f, 12670.0f, 12671.0f, 12672.0f, 12673.0f, 12674.0f, 12675.0f, 12676.0f, 12677.0f, 12678.0f, 12679.0f, 12680.0f, 12681.0f, 12682.0f, 12683.0f, 12684.0f, 12685.0f, 12686.0f, 12687.0f, 12688.0f, 12689.0f, 12690.0f, 12691.0f, 12692.0f, 12693.0f, 12694.0f, 12695.0f, 12696.0f, 12697.0f, 12698.0f, 12699.0f, 12700.0f, 12701.0f, 12702.0f, 12703.0f, 12704.0f, 12705.0f, 12706.0f, 12707.0f, 12708.0f, 12709.0f, 12710.0f, 12711.0f, 12712.0f, 12713.0f, 12714.0f, 12715.0f, 12716.0f, 12717.0f, 12718.0f, 12719.0f, 12720.0f, 12721.0f, 12722.0f, 12723.0f, 12724.0f, 12725.0f, 12726.0f, 12727.0f, 12728.0f, 12729.0f, 12730.0f, 12731.0f, 12732.0f, 12733.0f, 12734.0f, 12735.0f, 12736.0f, 12737.0f, 12738.0f, 12739.0f, 12740.0f, 12741.0f, 12742.0f, 12743.0f, 12744.0f, 12745.0f, 12746.0f, 12747.0f, 12748.0f, 12749.0f, 12750.0f, 12751.0f, 12752.0f, 12753.0f, 12754.0f, 12755.0f, 12756.0f, 12757.0f, 12758.0f, 12759.0f, 12760.0f, 12761.0f, 12762.0f, 12763.0f, 12764.0f, 12765.0f, 12766.0f, 12767.0f, 12768.0f, 12769.0f, 12770.0f, 12771.0f, 12772.0f, 12773.0f, 12774.0f, 12775.0f, 12776.0f, 12777.0f, 12778.0f, 12779.0f, 12780.0f, 12781.0f, 12782.0f, 12783.0f, 12784.0f, 12785.0f, 12786.0f, 12787.0f, 12788.0f, 12789.0f, 12790.0f, 12791.0f, 12792.0f, 12793.0f, 12794.0f, 12795.0f, 12796.0f, 12797.0f, 12798.0f, 12799.0f, 12800.0f, 12801.0f, 12802.0f, 12803.0f, 12804.0f, 12805.0f, 12806.0f, 12807.0f, 12808.0f, 12809.0f, 12810.0f, 12811.0f, 12812.0f, 12813.0f, 12814.0f, 12815.0f, 12816.0f, 12817.0f, 12818.0f, 12819.0f, 12820.0f, 12821.0f, 12822.0f, 12823.0f, 12824.0f, 12825.0f, 12826.0f, 12827.0f, 12828.0f, 12829.0f, 12830.0f, 12831.0f, 12832.0f, 12833.0f, 12834.0f, 12835.0f, 12836.0f, 12837.0f, 12838.0f, 12839.0f, 12840.0f, 12841.0f, 12842.0f, 12843.0f, 12844.0f, 12845.0f, 12846.0f, 12847.0f, 12848.0f, 12849.0f, 12850.0f, 12851.0f, 12852.0f, 12853.0f, 12854.0f, 12855.0f, 12856.0f, 12857.0f, 12858.0f, 12859.0f, 12860.0f, 12861.0f, 12862.0f, 12863.0f, 12864.0f, 12865.0f, 12866.0f, 12867.0f, 12868.0f, 12869.0f, 12870.0f, 12871.0f, 12872.0f, 12873.0f, 12874.0f, 12875.0f, 12876.0f, 12877.0f, 12878.0f, 12879.0f, 12880.0f, 12881.0f, 12882.0f, 12883.0f, 12884.0f, 12885.0f, 12886.0f, 12887.0f, 12888.0f, 12889.0f, 12890.0f, 12891.0f, 12892.0f, 12893.0f, 12894.0f, 12895.0f, 12896.0f, 12897.0f, 12898.0f, 12899.0f, 12900.0f, 12901.0f, 12902.0f, 12903.0f, 12904.0f, 12905.0f, 12906.0f, 12907.0f, 12908.0f, 12909.0f, 12910.0f, 12911.0f, 12912.0f, 12913.0f, 12914.0f, 12915.0f, 12916.0f, 12917.0f, 12918.0f, 12919.0f, 12920.0f, 12921.0f, 12922.0f, 12923.0f, 12924.0f, 12925.0f, 12926.0f, 12927.0f, 12928.0f, 12929.0f, 12930.0f, 12931.0f, 12932.0f, 12933.0f, 12934.0f, 12935.0f, 12936.0f, 12937.0f, 12938.0f, 12939.0f, 12940.0f, 12941.0f, 12942.0f, 12943.0f, 12944.0f, 12945.0f, 12946.0f, 12947.0f, 12948.0f, 12949.0f, 12950.0f, 12951.0f, 12952.0f, 12953.0f, 12954.0f, 12955.0f, 12956.0f, 12957.0f, 12958.0f, 12959.0f, 12960.0f, 12961.0f, 12962.0f, 12963.0f, 12964.0f, 12965.0f, 12966.0f, 12967.0f, 12968.0f, 12969.0f, 12970.0f, 12971.0f, 12972.0f, 12973.0f, 12974.0f, 12975.0f, 12976.0f, 12977.0f, 12978.0f, 12979.0f, 12980.0f, 12981.0f, 12982.0f, 12983.0f, 12984.0f, 12985.0f, 12986.0f, 12987.0f, 12988.0f, 12989.0f, 12990.0f, 12991.0f, 12992.0f, 12993.0f, 12994.0f, 12995.0f, 12996.0f, 12997.0f, 12998.0f, 12999.0f, 13000.0f, 13001.0f, 13002.0f, 13003.0f, 13004.0f, 13005.0f, 13006.0f, 13007.0f, 13008.0f, 13009.0f, 13010.0f, 13011.0f, 13012.0f, 13013.0f, 13014.0f, 13015.0f, 13016.0f, 13017.0f, 13018.0f, 13019.0f, 13020.0f, 13021.0f, 13022.0f, 13023.0f, 13024.0f, 13025.0f, 13026.0f, 13027.0f, 13028.0f, 13029.0f, 13030.0f, 13031.0f, 13032.0f, 13033.0f, 13034.0f, 13035.0f, 13036.0f, 13037.0f, 13038.0f, 13039.0f, 13040.0f, 13041.0f, 13042.0f, 13043.0f, 13044.0f, 13045.0f, 13046.0f, 13047.0f, 13048.0f, 13049.0f, 13050.0f, 13051.0f, 13052.0f, 13053.0f, 13054.0f, 13055.0f, 13056.0f, 13057.0f, 13058.0f, 13059.0f, 13060.0f, 13061.0f, 13062.0f, 13063.0f, 13064.0f, 13065.0f, 13066.0f, 13067.0f, 13068.0f, 13069.0f, 13070.0f, 13071.0f, 13072.0f, 13073.0f, 13074.0f, 13075.0f, 13076.0f, 13077.0f, 13078.0f, 13079.0f, 13080.0f, 13081.0f, 13082.0f, 13083.0f, 13084.0f, 13085.0f, 13086.0f, 13087.0f, 13088.0f, 13089.0f, 13090.0f, 13091.0f, 13092.0f, 13093.0f, 13094.0f, 13095.0f, 13096.0f, 13097.0f, 13098.0f, 13099.0f, 13100.0f, 13101.0f, 13102.0f, 13103.0f, 13104.0f, 13105.0f, 13106.0f, 13107.0f, 13108.0f, 13109.0f, 13110.0f, 13111.0f, 13112.0f, 13113.0f, 13114.0f, 13115.0f, 13116.0f, 13117.0f, 13118.0f, 13119.0f, 13120.0f, 13121.0f, 13122.0f, 13123.0f, 13124.0f, 13125.0f, 13126.0f, 13127.0f, 13128.0f, 13129.0f, 13130.0f, 13131.0f, 13132.0f, 13133.0f, 13134.0f, 13135.0f, 13136.0f, 13137.0f, 13138.0f, 13139.0f, 13140.0f, 13141.0f, 13142.0f, 13143.0f, 13144.0f, 13145.0f, 13146.0f, 13147.0f, 13148.0f, 13149.0f, 13150.0f, 13151.0f, 13152.0f, 13153.0f, 13154.0f, 13155.0f, 13156.0f, 13157.0f, 13158.0f, 13159.0f, 13160.0f, 13161.0f, 13162.0f, 13163.0f, 13164.0f, 13165.0f, 13166.0f, 13167.0f, 13168.0f, 13169.0f, 13170.0f, 13171.0f, 13172.0f, 13173.0f, 13174.0f, 13175.0f, 13176.0f, 13177.0f, 13178.0f, 13179.0f, 13180.0f, 13181.0f, 13182.0f, 13183.0f, 13184.0f, 13185.0f, 13186.0f, 13187.0f, 13188.0f, 13189.0f, 13190.0f, 13191.0f, 13192.0f, 13193.0f, 13194.0f, 13195.0f, 13196.0f, 13197.0f, 13198.0f, 13199.0f, 13200.0f, 13201.0f, 13202.0f, 13203.0f, 13204.0f, 13205.0f, 13206.0f, 13207.0f, 13208.0f, 13209.0f, 13210.0f, 13211.0f, 13212.0f, 13213.0f, 13214.0f, 13215.0f, 13216.0f, 13217.0f, 13218.0f, 13219.0f, 13220.0f, 13221.0f, 13222.0f, 13223.0f, 13224.0f, 13225.0f, 13226.0f, 13227.0f, 13228.0f, 13229.0f, 13230.0f, 13231.0f, 13232.0f, 13233.0f, 13234.0f, 13235.0f, 13236.0f, 13237.0f, 13238.0f, 13239.0f, 13240.0f, 13241.0f, 13242.0f, 13243.0f, 13244.0f, 13245.0f, 13246.0f, 13247.0f, 13248.0f, 13249.0f, 13250.0f, 13251.0f, 13252.0f, 13253.0f, 13254.0f, 13255.0f, 13256.0f, 13257.0f, 13258.0f, 13259.0f, 13260.0f, 13261.0f, 13262.0f, 13263.0f, 13264.0f, 13265.0f, 13266.0f, 13267.0f, 13268.0f, 13269.0f, 13270.0f, 13271.0f, 13272.0f, 13273.0f, 13274.0f, 13275.0f, 13276.0f, 13277.0f, 13278.0f, 13279.0f, 13280.0f, 13281.0f, 13282.0f, 13283.0f, 13284.0f, 13285.0f, 13286.0f, 13287.0f, 13288.0f, 13289.0f, 13290.0f, 13291.0f, 13292.0f, 13293.0f, 13294.0f, 13295.0f, 13296.0f, 13297.0f, 13298.0f, 13299.0f, 13300.0f, 13301.0f, 13302.0f, 13303.0f, 13304.0f, 13305.0f, 13306.0f, 13307.0f, 13308.0f, 13309.0f, 13310.0f, 13311.0f, 13312.0f, 13313.0f, 13314.0f, 13315.0f, 13316.0f, 13317.0f, 13318.0f, 13319.0f, 13320.0f, 13321.0f, 13322.0f, 13323.0f, 13324.0f, 13325.0f, 13326.0f, 13327.0f, 13328.0f, 13329.0f, 13330.0f, 13331.0f, 13332.0f, 13333.0f, 13334.0f, 13335.0f, 13336.0f, 13337.0f, 13338.0f, 13339.0f, 13340.0f, 13341.0f, 13342.0f, 13343.0f, 13344.0f, 13345.0f, 13346.0f, 13347.0f, 13348.0f, 13349.0f, 13350.0f, 13351.0f, 13352.0f, 13353.0f, 13354.0f, 13355.0f, 13356.0f, 13357.0f, 13358.0f, 13359.0f, 13360.0f, 13361.0f, 13362.0f, 13363.0f, 13364.0f, 13365.0f, 13366.0f, 13367.0f, 13368.0f, 13369.0f, 13370.0f, 13371.0f, 13372.0f, 13373.0f, 13374.0f, 13375.0f, 13376.0f, 13377.0f, 13378.0f, 13379.0f, 13380.0f, 13381.0f, 13382.0f, 13383.0f, 13384.0f, 13385.0f, 13386.0f, 13387.0f, 13388.0f, 13389.0f, 13390.0f, 13391.0f, 13392.0f, 13393.0f, 13394.0f, 13395.0f, 13396.0f, 13397.0f, 13398.0f, 13399.0f, 13400.0f, 13401.0f, 13402.0f, 13403.0f, 13404.0f, 13405.0f, 13406.0f, 13407.0f, 13408.0f, 13409.0f, 13410.0f, 13411.0f, 13412.0f, 13413.0f, 13414.0f, 13415.0f, 13416.0f, 13417.0f, 13418.0f, 13419.0f, 13420.0f, 13421.0f, 13422.0f, 13423.0f, 13424.0f, 13425.0f, 13426.0f, 13427.0f, 13428.0f, 13429.0f, 13430.0f, 13431.0f, 13432.0f, 13433.0f, 13434.0f, 13435.0f, 13436.0f, 13437.0f, 13438.0f, 13439.0f, 13440.0f, 13441.0f, 13442.0f, 13443.0f, 13444.0f, 13445.0f, 13446.0f, 13447.0f, 13448.0f, 13449.0f, 13450.0f, 13451.0f, 13452.0f, 13453.0f, 13454.0f, 13455.0f, 13456.0f, 13457.0f, 13458.0f, 13459.0f, 13460.0f, 13461.0f, 13462.0f, 13463.0f, 13464.0f, 13465.0f, 13466.0f, 13467.0f, 13468.0f, 13469.0f, 13470.0f, 13471.0f, 13472.0f, 13473.0f, 13474.0f, 13475.0f, 13476.0f, 13477.0f, 13478.0f, 13479.0f, 13480.0f, 13481.0f, 13482.0f, 13483.0f, 13484.0f, 13485.0f, 13486.0f, 13487.0f, 13488.0f, 13489.0f, 13490.0f, 13491.0f, 13492.0f, 13493.0f, 13494.0f, 13495.0f, 13496.0f, 13497.0f, 13498.0f, 13499.0f, 13500.0f, 13501.0f, 13502.0f, 13503.0f, 13504.0f, 13505.0f, 13506.0f, 13507.0f, 13508.0f, 13509.0f, 13510.0f, 13511.0f, 13512.0f, 13513.0f, 13514.0f, 13515.0f, 13516.0f, 13517.0f, 13518.0f, 13519.0f, 13520.0f, 13521.0f, 13522.0f, 13523.0f, 13524.0f, 13525.0f, 13526.0f, 13527.0f, 13528.0f, 13529.0f, 13530.0f, 13531.0f, 13532.0f, 13533.0f, 13534.0f, 13535.0f, 13536.0f, 13537.0f, 13538.0f, 13539.0f, 13540.0f, 13541.0f, 13542.0f, 13543.0f, 13544.0f, 13545.0f, 13546.0f, 13547.0f, 13548.0f, 13549.0f, 13550.0f, 13551.0f, 13552.0f, 13553.0f, 13554.0f, 13555.0f, 13556.0f, 13557.0f, 13558.0f, 13559.0f, 13560.0f, 13561.0f, 13562.0f, 13563.0f, 13564.0f, 13565.0f, 13566.0f, 13567.0f, 13568.0f, 13569.0f, 13570.0f, 13571.0f, 13572.0f, 13573.0f, 13574.0f, 13575.0f, 13576.0f, 13577.0f, 13578.0f, 13579.0f, 13580.0f, 13581.0f, 13582.0f, 13583.0f, 13584.0f, 13585.0f, 13586.0f, 13587.0f, 13588.0f, 13589.0f, 13590.0f, 13591.0f, 13592.0f, 13593.0f, 13594.0f, 13595.0f, 13596.0f, 13597.0f, 13598.0f, 13599.0f, 13600.0f, 13601.0f, 13602.0f, 13603.0f, 13604.0f, 13605.0f, 13606.0f, 13607.0f, 13608.0f, 13609.0f, 13610.0f, 13611.0f, 13612.0f, 13613.0f, 13614.0f, 13615.0f, 13616.0f, 13617.0f, 13618.0f, 13619.0f, 13620.0f, 13621.0f, 13622.0f, 13623.0f, 13624.0f, 13625.0f, 13626.0f, 13627.0f, 13628.0f, 13629.0f, 13630.0f, 13631.0f, 13632.0f, 13633.0f, 13634.0f, 13635.0f, 13636.0f, 13637.0f, 13638.0f, 13639.0f, 13640.0f, 13641.0f, 13642.0f, 13643.0f, 13644.0f, 13645.0f, 13646.0f, 13647.0f, 13648.0f, 13649.0f, 13650.0f, 13651.0f, 13652.0f, 13653.0f, 13654.0f, 13655.0f, 13656.0f, 13657.0f, 13658.0f, 13659.0f, 13660.0f, 13661.0f, 13662.0f, 13663.0f, 13664.0f, 13665.0f, 13666.0f, 13667.0f, 13668.0f, 13669.0f, 13670.0f, 13671.0f, 13672.0f, 13673.0f, 13674.0f, 13675.0f, 13676.0f, 13677.0f, 13678.0f, 13679.0f, 13680.0f, 13681.0f, 13682.0f, 13683.0f, 13684.0f, 13685.0f, 13686.0f, 13687.0f, 13688.0f, 13689.0f, 13690.0f, 13691.0f, 13692.0f, 13693.0f, 13694.0f, 13695.0f, 13696.0f, 13697.0f, 13698.0f, 13699.0f, 13700.0f, 13701.0f, 13702.0f, 13703.0f, 13704.0f, 13705.0f, 13706.0f, 13707.0f, 13708.0f, 13709.0f, 13710.0f, 13711.0f, 13712.0f, 13713.0f, 13714.0f, 13715.0f, 13716.0f, 13717.0f, 13718.0f, 13719.0f, 13720.0f, 13721.0f, 13722.0f, 13723.0f, 13724.0f, 13725.0f, 13726.0f, 13727.0f, 13728.0f, 13729.0f, 13730.0f, 13731.0f, 13732.0f, 13733.0f, 13734.0f, 13735.0f, 13736.0f, 13737.0f, 13738.0f, 13739.0f, 13740.0f, 13741.0f, 13742.0f, 13743.0f, 13744.0f, 13745.0f, 13746.0f, 13747.0f, 13748.0f, 13749.0f, 13750.0f, 13751.0f, 13752.0f, 13753.0f, 13754.0f, 13755.0f, 13756.0f, 13757.0f, 13758.0f, 13759.0f, 13760.0f, 13761.0f, 13762.0f, 13763.0f, 13764.0f, 13765.0f, 13766.0f, 13767.0f, 13768.0f, 13769.0f, 13770.0f, 13771.0f, 13772.0f, 13773.0f, 13774.0f, 13775.0f, 13776.0f, 13777.0f, 13778.0f, 13779.0f, 13780.0f, 13781.0f, 13782.0f, 13783.0f, 13784.0f, 13785.0f, 13786.0f, 13787.0f, 13788.0f, 13789.0f, 13790.0f, 13791.0f, 13792.0f, 13793.0f, 13794.0f, 13795.0f, 13796.0f, 13797.0f, 13798.0f, 13799.0f, 13800.0f, 13801.0f, 13802.0f, 13803.0f, 13804.0f, 13805.0f, 13806.0f, 13807.0f, 13808.0f, 13809.0f, 13810.0f, 13811.0f, 13812.0f, 13813.0f, 13814.0f, 13815.0f, 13816.0f, 13817.0f, 13818.0f, 13819.0f, 13820.0f, 13821.0f, 13822.0f, 13823.0f, 13824.0f, 13825.0f, 13826.0f, 13827.0f, 13828.0f, 13829.0f, 13830.0f, 13831.0f, 13832.0f, 13833.0f, 13834.0f, 13835.0f, 13836.0f, 13837.0f, 13838.0f, 13839.0f, 13840.0f, 13841.0f, 13842.0f, 13843.0f, 13844.0f, 13845.0f, 13846.0f, 13847.0f, 13848.0f, 13849.0f, 13850.0f, 13851.0f, 13852.0f, 13853.0f, 13854.0f, 13855.0f, 13856.0f, 13857.0f, 13858.0f, 13859.0f, 13860.0f, 13861.0f, 13862.0f, 13863.0f, 13864.0f, 13865.0f, 13866.0f, 13867.0f, 13868.0f, 13869.0f, 13870.0f, 13871.0f, 13872.0f, 13873.0f, 13874.0f, 13875.0f, 13876.0f, 13877.0f, 13878.0f, 13879.0f, 13880.0f, 13881.0f, 13882.0f, 13883.0f, 13884.0f, 13885.0f, 13886.0f, 13887.0f, 13888.0f, 13889.0f, 13890.0f, 13891.0f, 13892.0f, 13893.0f, 13894.0f, 13895.0f, 13896.0f, 13897.0f, 13898.0f, 13899.0f, 13900.0f, 13901.0f, 13902.0f, 13903.0f, 13904.0f, 13905.0f, 13906.0f, 13907.0f, 13908.0f, 13909.0f, 13910.0f, 13911.0f, 13912.0f, 13913.0f, 13914.0f, 13915.0f, 13916.0f, 13917.0f, 13918.0f, 13919.0f, 13920.0f, 13921.0f, 13922.0f, 13923.0f, 13924.0f, 13925.0f, 13926.0f, 13927.0f, 13928.0f, 13929.0f, 13930.0f, 13931.0f, 13932.0f, 13933.0f, 13934.0f, 13935.0f, 13936.0f, 13937.0f, 13938.0f, 13939.0f, 13940.0f, 13941.0f, 13942.0f, 13943.0f, 13944.0f, 13945.0f, 13946.0f, 13947.0f, 13948.0f, 13949.0f, 13950.0f, 13951.0f, 13952.0f, 13953.0f, 13954.0f, 13955.0f, 13956.0f, 13957.0f, 13958.0f, 13959.0f, 13960.0f, 13961.0f, 13962.0f, 13963.0f, 13964.0f, 13965.0f, 13966.0f, 13967.0f, 13968.0f, 13969.0f, 13970.0f, 13971.0f, 13972.0f, 13973.0f, 13974.0f, 13975.0f, 13976.0f, 13977.0f, 13978.0f, 13979.0f, 13980.0f, 13981.0f, 13982.0f, 13983.0f, 13984.0f, 13985.0f, 13986.0f, 13987.0f, 13988.0f, 13989.0f, 13990.0f, 13991.0f, 13992.0f, 13993.0f, 13994.0f, 13995.0f, 13996.0f, 13997.0f, 13998.0f, 13999.0f, 14000.0f, 14001.0f, 14002.0f, 14003.0f, 14004.0f, 14005.0f, 14006.0f, 14007.0f, 14008.0f, 14009.0f, 14010.0f, 14011.0f, 14012.0f, 14013.0f, 14014.0f, 14015.0f, 14016.0f, 14017.0f, 14018.0f, 14019.0f, 14020.0f, 14021.0f, 14022.0f, 14023.0f, 14024.0f, 14025.0f, 14026.0f, 14027.0f, 14028.0f, 14029.0f, 14030.0f, 14031.0f, 14032.0f, 14033.0f, 14034.0f, 14035.0f, 14036.0f, 14037.0f, 14038.0f, 14039.0f, 14040.0f, 14041.0f, 14042.0f, 14043.0f, 14044.0f, 14045.0f, 14046.0f, 14047.0f, 14048.0f, 14049.0f, 14050.0f, 14051.0f, 14052.0f, 14053.0f, 14054.0f, 14055.0f, 14056.0f, 14057.0f, 14058.0f, 14059.0f, 14060.0f, 14061.0f, 14062.0f, 14063.0f, 14064.0f, 14065.0f, 14066.0f, 14067.0f, 14068.0f, 14069.0f, 14070.0f, 14071.0f, 14072.0f, 14073.0f, 14074.0f, 14075.0f, 14076.0f, 14077.0f, 14078.0f, 14079.0f, 14080.0f, 14081.0f, 14082.0f, 14083.0f, 14084.0f, 14085.0f, 14086.0f, 14087.0f, 14088.0f, 14089.0f, 14090.0f, 14091.0f, 14092.0f, 14093.0f, 14094.0f, 14095.0f, 14096.0f, 14097.0f, 14098.0f, 14099.0f, 14100.0f, 14101.0f, 14102.0f, 14103.0f, 14104.0f, 14105.0f, 14106.0f, 14107.0f, 14108.0f, 14109.0f, 14110.0f, 14111.0f, 14112.0f, 14113.0f, 14114.0f, 14115.0f, 14116.0f, 14117.0f, 14118.0f, 14119.0f, 14120.0f, 14121.0f, 14122.0f, 14123.0f, 14124.0f, 14125.0f, 14126.0f, 14127.0f, 14128.0f, 14129.0f, 14130.0f, 14131.0f, 14132.0f, 14133.0f, 14134.0f, 14135.0f, 14136.0f, 14137.0f, 14138.0f, 14139.0f, 14140.0f, 14141.0f, 14142.0f, 14143.0f, 14144.0f, 14145.0f, 14146.0f, 14147.0f, 14148.0f, 14149.0f, 14150.0f, 14151.0f, 14152.0f, 14153.0f, 14154.0f, 14155.0f, 14156.0f, 14157.0f, 14158.0f, 14159.0f, 14160.0f, 14161.0f, 14162.0f, 14163.0f, 14164.0f, 14165.0f, 14166.0f, 14167.0f, 14168.0f, 14169.0f, 14170.0f, 14171.0f, 14172.0f, 14173.0f, 14174.0f, 14175.0f, 14176.0f, 14177.0f, 14178.0f, 14179.0f, 14180.0f, 14181.0f, 14182.0f, 14183.0f, 14184.0f, 14185.0f, 14186.0f, 14187.0f, 14188.0f, 14189.0f, 14190.0f, 14191.0f, 14192.0f, 14193.0f, 14194.0f, 14195.0f, 14196.0f, 14197.0f, 14198.0f, 14199.0f, 14200.0f, 14201.0f, 14202.0f, 14203.0f, 14204.0f, 14205.0f, 14206.0f, 14207.0f, 14208.0f, 14209.0f, 14210.0f, 14211.0f, 14212.0f, 14213.0f, 14214.0f, 14215.0f, 14216.0f, 14217.0f, 14218.0f, 14219.0f, 14220.0f, 14221.0f, 14222.0f, 14223.0f, 14224.0f, 14225.0f, 14226.0f, 14227.0f, 14228.0f, 14229.0f, 14230.0f, 14231.0f, 14232.0f, 14233.0f, 14234.0f, 14235.0f, 14236.0f, 14237.0f, 14238.0f, 14239.0f, 14240.0f, 14241.0f, 14242.0f, 14243.0f, 14244.0f, 14245.0f, 14246.0f, 14247.0f, 14248.0f, 14249.0f, 14250.0f, 14251.0f, 14252.0f, 14253.0f, 14254.0f, 14255.0f, 14256.0f, 14257.0f, 14258.0f, 14259.0f, 14260.0f, 14261.0f, 14262.0f, 14263.0f, 14264.0f, 14265.0f, 14266.0f, 14267.0f, 14268.0f, 14269.0f, 14270.0f, 14271.0f, 14272.0f, 14273.0f, 14274.0f, 14275.0f, 14276.0f, 14277.0f, 14278.0f, 14279.0f, 14280.0f, 14281.0f, 14282.0f, 14283.0f, 14284.0f, 14285.0f, 14286.0f, 14287.0f, 14288.0f, 14289.0f, 14290.0f, 14291.0f, 14292.0f, 14293.0f, 14294.0f, 14295.0f, 14296.0f, 14297.0f, 14298.0f, 14299.0f, 14300.0f, 14301.0f, 14302.0f, 14303.0f, 14304.0f, 14305.0f, 14306.0f, 14307.0f, 14308.0f, 14309.0f, 14310.0f, 14311.0f, 14312.0f, 14313.0f, 14314.0f, 14315.0f, 14316.0f, 14317.0f, 14318.0f, 14319.0f, 14320.0f, 14321.0f, 14322.0f, 14323.0f, 14324.0f, 14325.0f, 14326.0f, 14327.0f, 14328.0f, 14329.0f, 14330.0f, 14331.0f, 14332.0f, 14333.0f, 14334.0f, 14335.0f, 14336.0f, 14337.0f, 14338.0f, 14339.0f, 14340.0f, 14341.0f, 14342.0f, 14343.0f, 14344.0f, 14345.0f, 14346.0f, 14347.0f, 14348.0f, 14349.0f, 14350.0f, 14351.0f, 14352.0f, 14353.0f, 14354.0f, 14355.0f, 14356.0f, 14357.0f, 14358.0f, 14359.0f, 14360.0f, 14361.0f, 14362.0f, 14363.0f, 14364.0f, 14365.0f, 14366.0f, 14367.0f, 14368.0f, 14369.0f, 14370.0f, 14371.0f, 14372.0f, 14373.0f, 14374.0f, 14375.0f, 14376.0f, 14377.0f, 14378.0f, 14379.0f, 14380.0f, 14381.0f, 14382.0f, 14383.0f, 14384.0f, 14385.0f, 14386.0f, 14387.0f, 14388.0f, 14389.0f, 14390.0f, 14391.0f, 14392.0f, 14393.0f, 14394.0f, 14395.0f, 14396.0f, 14397.0f, 14398.0f, 14399.0f, 14400.0f, 14401.0f, 14402.0f, 14403.0f, 14404.0f, 14405.0f, 14406.0f, 14407.0f, 14408.0f, 14409.0f, 14410.0f, 14411.0f, 14412.0f, 14413.0f, 14414.0f, 14415.0f, 14416.0f, 14417.0f, 14418.0f, 14419.0f, 14420.0f, 14421.0f, 14422.0f, 14423.0f, 14424.0f, 14425.0f, 14426.0f, 14427.0f, 14428.0f, 14429.0f, 14430.0f, 14431.0f, 14432.0f, 14433.0f, 14434.0f, 14435.0f, 14436.0f, 14437.0f, 14438.0f, 14439.0f, 14440.0f, 14441.0f, 14442.0f, 14443.0f, 14444.0f, 14445.0f, 14446.0f, 14447.0f, 14448.0f, 14449.0f, 14450.0f, 14451.0f, 14452.0f, 14453.0f, 14454.0f, 14455.0f, 14456.0f, 14457.0f, 14458.0f, 14459.0f, 14460.0f, 14461.0f, 14462.0f, 14463.0f, 14464.0f, 14465.0f, 14466.0f, 14467.0f, 14468.0f, 14469.0f, 14470.0f, 14471.0f, 14472.0f, 14473.0f, 14474.0f, 14475.0f, 14476.0f, 14477.0f, 14478.0f, 14479.0f, 14480.0f, 14481.0f, 14482.0f, 14483.0f, 14484.0f, 14485.0f, 14486.0f, 14487.0f, 14488.0f, 14489.0f, 14490.0f, 14491.0f, 14492.0f, 14493.0f, 14494.0f, 14495.0f, 14496.0f, 14497.0f, 14498.0f, 14499.0f, 14500.0f, 14501.0f, 14502.0f, 14503.0f, 14504.0f, 14505.0f, 14506.0f, 14507.0f, 14508.0f, 14509.0f, 14510.0f, 14511.0f, 14512.0f, 14513.0f, 14514.0f, 14515.0f, 14516.0f, 14517.0f, 14518.0f, 14519.0f, 14520.0f, 14521.0f, 14522.0f, 14523.0f, 14524.0f, 14525.0f, 14526.0f, 14527.0f, 14528.0f, 14529.0f, 14530.0f, 14531.0f, 14532.0f, 14533.0f, 14534.0f, 14535.0f, 14536.0f, 14537.0f, 14538.0f, 14539.0f, 14540.0f, 14541.0f, 14542.0f, 14543.0f, 14544.0f, 14545.0f, 14546.0f, 14547.0f, 14548.0f, 14549.0f, 14550.0f, 14551.0f, 14552.0f, 14553.0f, 14554.0f, 14555.0f, 14556.0f, 14557.0f, 14558.0f, 14559.0f, 14560.0f, 14561.0f, 14562.0f, 14563.0f, 14564.0f, 14565.0f, 14566.0f, 14567.0f, 14568.0f, 14569.0f, 14570.0f, 14571.0f, 14572.0f, 14573.0f, 14574.0f, 14575.0f, 14576.0f, 14577.0f, 14578.0f, 14579.0f, 14580.0f, 14581.0f, 14582.0f, 14583.0f, 14584.0f, 14585.0f, 14586.0f, 14587.0f, 14588.0f, 14589.0f, 14590.0f, 14591.0f, 14592.0f, 14593.0f, 14594.0f, 14595.0f, 14596.0f, 14597.0f, 14598.0f, 14599.0f, 14600.0f, 14601.0f, 14602.0f, 14603.0f, 14604.0f, 14605.0f, 14606.0f, 14607.0f, 14608.0f, 14609.0f, 14610.0f, 14611.0f, 14612.0f, 14613.0f, 14614.0f, 14615.0f, 14616.0f, 14617.0f, 14618.0f, 14619.0f, 14620.0f, 14621.0f, 14622.0f, 14623.0f, 14624.0f, 14625.0f, 14626.0f, 14627.0f, 14628.0f, 14629.0f, 14630.0f, 14631.0f, 14632.0f, 14633.0f, 14634.0f, 14635.0f, 14636.0f, 14637.0f, 14638.0f, 14639.0f, 14640.0f, 14641.0f, 14642.0f, 14643.0f, 14644.0f, 14645.0f, 14646.0f, 14647.0f, 14648.0f, 14649.0f, 14650.0f, 14651.0f, 14652.0f, 14653.0f, 14654.0f, 14655.0f, 14656.0f, 14657.0f, 14658.0f, 14659.0f, 14660.0f, 14661.0f, 14662.0f, 14663.0f, 14664.0f, 14665.0f, 14666.0f, 14667.0f, 14668.0f, 14669.0f, 14670.0f, 14671.0f, 14672.0f, 14673.0f, 14674.0f, 14675.0f, 14676.0f, 14677.0f, 14678.0f, 14679.0f, 14680.0f, 14681.0f, 14682.0f, 14683.0f, 14684.0f, 14685.0f, 14686.0f, 14687.0f, 14688.0f, 14689.0f, 14690.0f, 14691.0f, 14692.0f, 14693.0f, 14694.0f, 14695.0f, 14696.0f, 14697.0f, 14698.0f, 14699.0f, 14700.0f, 14701.0f, 14702.0f, 14703.0f, 14704.0f, 14705.0f, 14706.0f, 14707.0f, 14708.0f, 14709.0f, 14710.0f, 14711.0f, 14712.0f, 14713.0f, 14714.0f, 14715.0f, 14716.0f, 14717.0f, 14718.0f, 14719.0f, 14720.0f, 14721.0f, 14722.0f, 14723.0f, 14724.0f, 14725.0f, 14726.0f, 14727.0f, 14728.0f, 14729.0f, 14730.0f, 14731.0f, 14732.0f, 14733.0f, 14734.0f, 14735.0f, 14736.0f, 14737.0f, 14738.0f, 14739.0f, 14740.0f, 14741.0f, 14742.0f, 14743.0f, 14744.0f, 14745.0f, 14746.0f, 14747.0f, 14748.0f, 14749.0f, 14750.0f, 14751.0f, 14752.0f, 14753.0f, 14754.0f, 14755.0f, 14756.0f, 14757.0f, 14758.0f, 14759.0f, 14760.0f, 14761.0f, 14762.0f, 14763.0f, 14764.0f, 14765.0f, 14766.0f, 14767.0f, 14768.0f, 14769.0f, 14770.0f, 14771.0f, 14772.0f, 14773.0f, 14774.0f, 14775.0f, 14776.0f, 14777.0f, 14778.0f, 14779.0f, 14780.0f, 14781.0f, 14782.0f, 14783.0f, 14784.0f, 14785.0f, 14786.0f, 14787.0f, 14788.0f, 14789.0f, 14790.0f, 14791.0f, 14792.0f, 14793.0f, 14794.0f, 14795.0f, 14796.0f, 14797.0f, 14798.0f, 14799.0f, 14800.0f, 14801.0f, 14802.0f, 14803.0f, 14804.0f, 14805.0f, 14806.0f, 14807.0f, 14808.0f, 14809.0f, 14810.0f, 14811.0f, 14812.0f, 14813.0f, 14814.0f, 14815.0f, 14816.0f, 14817.0f, 14818.0f, 14819.0f, 14820.0f, 14821.0f, 14822.0f, 14823.0f, 14824.0f, 14825.0f, 14826.0f, 14827.0f, 14828.0f, 14829.0f, 14830.0f, 14831.0f, 14832.0f, 14833.0f, 14834.0f, 14835.0f, 14836.0f, 14837.0f, 14838.0f, 14839.0f, 14840.0f, 14841.0f, 14842.0f, 14843.0f, 14844.0f, 14845.0f, 14846.0f, 14847.0f, 14848.0f, 14849.0f, 14850.0f, 14851.0f, 14852.0f, 14853.0f, 14854.0f, 14855.0f, 14856.0f, 14857.0f, 14858.0f, 14859.0f, 14860.0f, 14861.0f, 14862.0f, 14863.0f, 14864.0f, 14865.0f, 14866.0f, 14867.0f, 14868.0f, 14869.0f, 14870.0f, 14871.0f, 14872.0f, 14873.0f, 14874.0f, 14875.0f, 14876.0f, 14877.0f, 14878.0f, 14879.0f, 14880.0f, 14881.0f, 14882.0f, 14883.0f, 14884.0f, 14885.0f, 14886.0f, 14887.0f, 14888.0f, 14889.0f, 14890.0f, 14891.0f, 14892.0f, 14893.0f, 14894.0f, 14895.0f, 14896.0f, 14897.0f, 14898.0f, 14899.0f, 14900.0f, 14901.0f, 14902.0f, 14903.0f, 14904.0f, 14905.0f, 14906.0f, 14907.0f, 14908.0f, 14909.0f, 14910.0f, 14911.0f, 14912.0f, 14913.0f, 14914.0f, 14915.0f, 14916.0f, 14917.0f, 14918.0f, 14919.0f, 14920.0f, 14921.0f, 14922.0f, 14923.0f, 14924.0f, 14925.0f, 14926.0f, 14927.0f, 14928.0f, 14929.0f, 14930.0f, 14931.0f, 14932.0f, 14933.0f, 14934.0f, 14935.0f, 14936.0f, 14937.0f, 14938.0f, 14939.0f, 14940.0f, 14941.0f, 14942.0f, 14943.0f, 14944.0f, 14945.0f, 14946.0f, 14947.0f, 14948.0f, 14949.0f, 14950.0f, 14951.0f, 14952.0f, 14953.0f, 14954.0f, 14955.0f, 14956.0f, 14957.0f, 14958.0f, 14959.0f, 14960.0f, 14961.0f, 14962.0f, 14963.0f, 14964.0f, 14965.0f, 14966.0f, 14967.0f, 14968.0f, 14969.0f, 14970.0f, 14971.0f, 14972.0f, 14973.0f, 14974.0f, 14975.0f, 14976.0f, 14977.0f, 14978.0f, 14979.0f, 14980.0f, 14981.0f, 14982.0f, 14983.0f, 14984.0f, 14985.0f, 14986.0f, 14987.0f, 14988.0f, 14989.0f, 14990.0f, 14991.0f, 14992.0f, 14993.0f, 14994.0f, 14995.0f, 14996.0f, 14997.0f, 14998.0f, 14999.0f, 15000.0f, 15001.0f, 15002.0f, 15003.0f, 15004.0f, 15005.0f, 15006.0f, 15007.0f, 15008.0f, 15009.0f, 15010.0f, 15011.0f, 15012.0f, 15013.0f, 15014.0f, 15015.0f, 15016.0f, 15017.0f, 15018.0f, 15019.0f, 15020.0f, 15021.0f, 15022.0f, 15023.0f, 15024.0f, 15025.0f, 15026.0f, 15027.0f, 15028.0f, 15029.0f, 15030.0f, 15031.0f, 15032.0f, 15033.0f, 15034.0f, 15035.0f, 15036.0f, 15037.0f, 15038.0f, 15039.0f, 15040.0f, 15041.0f, 15042.0f, 15043.0f, 15044.0f, 15045.0f, 15046.0f, 15047.0f, 15048.0f, 15049.0f, 15050.0f, 15051.0f, 15052.0f, 15053.0f, 15054.0f, 15055.0f, 15056.0f, 15057.0f, 15058.0f, 15059.0f, 15060.0f, 15061.0f, 15062.0f, 15063.0f, 15064.0f, 15065.0f, 15066.0f, 15067.0f, 15068.0f, 15069.0f, 15070.0f, 15071.0f, 15072.0f, 15073.0f, 15074.0f, 15075.0f, 15076.0f, 15077.0f, 15078.0f, 15079.0f, 15080.0f, 15081.0f, 15082.0f, 15083.0f, 15084.0f, 15085.0f, 15086.0f, 15087.0f, 15088.0f, 15089.0f, 15090.0f, 15091.0f, 15092.0f, 15093.0f, 15094.0f, 15095.0f, 15096.0f, 15097.0f, 15098.0f, 15099.0f, 15100.0f, 15101.0f, 15102.0f, 15103.0f, 15104.0f, 15105.0f, 15106.0f, 15107.0f, 15108.0f, 15109.0f, 15110.0f, 15111.0f, 15112.0f, 15113.0f, 15114.0f, 15115.0f, 15116.0f, 15117.0f, 15118.0f, 15119.0f, 15120.0f, 15121.0f, 15122.0f, 15123.0f, 15124.0f, 15125.0f, 15126.0f, 15127.0f, 15128.0f, 15129.0f, 15130.0f, 15131.0f, 15132.0f, 15133.0f, 15134.0f, 15135.0f, 15136.0f, 15137.0f, 15138.0f, 15139.0f, 15140.0f, 15141.0f, 15142.0f, 15143.0f, 15144.0f, 15145.0f, 15146.0f, 15147.0f, 15148.0f, 15149.0f, 15150.0f, 15151.0f, 15152.0f, 15153.0f, 15154.0f, 15155.0f, 15156.0f, 15157.0f, 15158.0f, 15159.0f, 15160.0f, 15161.0f, 15162.0f, 15163.0f, 15164.0f, 15165.0f, 15166.0f, 15167.0f, 15168.0f, 15169.0f, 15170.0f, 15171.0f, 15172.0f, 15173.0f, 15174.0f, 15175.0f, 15176.0f, 15177.0f, 15178.0f, 15179.0f, 15180.0f, 15181.0f, 15182.0f, 15183.0f, 15184.0f, 15185.0f, 15186.0f, 15187.0f, 15188.0f, 15189.0f, 15190.0f, 15191.0f, 15192.0f, 15193.0f, 15194.0f, 15195.0f, 15196.0f, 15197.0f, 15198.0f, 15199.0f, 15200.0f, 15201.0f, 15202.0f, 15203.0f, 15204.0f, 15205.0f, 15206.0f, 15207.0f, 15208.0f, 15209.0f, 15210.0f, 15211.0f, 15212.0f, 15213.0f, 15214.0f, 15215.0f, 15216.0f, 15217.0f, 15218.0f, 15219.0f, 15220.0f, 15221.0f, 15222.0f, 15223.0f, 15224.0f, 15225.0f, 15226.0f, 15227.0f, 15228.0f, 15229.0f, 15230.0f, 15231.0f, 15232.0f, 15233.0f, 15234.0f, 15235.0f, 15236.0f, 15237.0f, 15238.0f, 15239.0f, 15240.0f, 15241.0f, 15242.0f, 15243.0f, 15244.0f, 15245.0f, 15246.0f, 15247.0f, 15248.0f, 15249.0f, 15250.0f, 15251.0f, 15252.0f, 15253.0f, 15254.0f, 15255.0f, 15256.0f, 15257.0f, 15258.0f, 15259.0f, 15260.0f, 15261.0f, 15262.0f, 15263.0f, 15264.0f, 15265.0f, 15266.0f, 15267.0f, 15268.0f, 15269.0f, 15270.0f, 15271.0f, 15272.0f, 15273.0f, 15274.0f, 15275.0f, 15276.0f, 15277.0f, 15278.0f, 15279.0f, 15280.0f, 15281.0f, 15282.0f, 15283.0f, 15284.0f, 15285.0f, 15286.0f, 15287.0f, 15288.0f, 15289.0f, 15290.0f, 15291.0f, 15292.0f, 15293.0f, 15294.0f, 15295.0f, 15296.0f, 15297.0f, 15298.0f, 15299.0f, 15300.0f, 15301.0f, 15302.0f, 15303.0f, 15304.0f, 15305.0f, 15306.0f, 15307.0f, 15308.0f, 15309.0f, 15310.0f, 15311.0f, 15312.0f, 15313.0f, 15314.0f, 15315.0f, 15316.0f, 15317.0f, 15318.0f, 15319.0f, 15320.0f, 15321.0f, 15322.0f, 15323.0f, 15324.0f, 15325.0f, 15326.0f, 15327.0f, 15328.0f, 15329.0f, 15330.0f, 15331.0f, 15332.0f, 15333.0f, 15334.0f, 15335.0f, 15336.0f, 15337.0f, 15338.0f, 15339.0f, 15340.0f, 15341.0f, 15342.0f, 15343.0f, 15344.0f, 15345.0f, 15346.0f, 15347.0f, 15348.0f, 15349.0f, 15350.0f, 15351.0f, 15352.0f, 15353.0f, 15354.0f, 15355.0f, 15356.0f, 15357.0f, 15358.0f, 15359.0f, 15360.0f, 15361.0f, 15362.0f, 15363.0f, 15364.0f, 15365.0f, 15366.0f, 15367.0f, 15368.0f, 15369.0f, 15370.0f, 15371.0f, 15372.0f, 15373.0f, 15374.0f, 15375.0f, 15376.0f, 15377.0f, 15378.0f, 15379.0f, 15380.0f, 15381.0f, 15382.0f, 15383.0f, 15384.0f, 15385.0f, 15386.0f, 15387.0f, 15388.0f, 15389.0f, 15390.0f, 15391.0f, 15392.0f, 15393.0f, 15394.0f, 15395.0f, 15396.0f, 15397.0f, 15398.0f, 15399.0f, 15400.0f, 15401.0f, 15402.0f, 15403.0f, 15404.0f, 15405.0f, 15406.0f, 15407.0f, 15408.0f, 15409.0f, 15410.0f, 15411.0f, 15412.0f, 15413.0f, 15414.0f, 15415.0f, 15416.0f, 15417.0f, 15418.0f, 15419.0f, 15420.0f, 15421.0f, 15422.0f, 15423.0f, 15424.0f, 15425.0f, 15426.0f, 15427.0f, 15428.0f, 15429.0f, 15430.0f, 15431.0f, 15432.0f, 15433.0f, 15434.0f, 15435.0f, 15436.0f, 15437.0f, 15438.0f, 15439.0f, 15440.0f, 15441.0f, 15442.0f, 15443.0f, 15444.0f, 15445.0f, 15446.0f, 15447.0f, 15448.0f, 15449.0f, 15450.0f, 15451.0f, 15452.0f, 15453.0f, 15454.0f, 15455.0f, 15456.0f, 15457.0f, 15458.0f, 15459.0f, 15460.0f, 15461.0f, 15462.0f, 15463.0f, 15464.0f, 15465.0f, 15466.0f, 15467.0f, 15468.0f, 15469.0f, 15470.0f, 15471.0f, 15472.0f, 15473.0f, 15474.0f, 15475.0f, 15476.0f, 15477.0f, 15478.0f, 15479.0f, 15480.0f, 15481.0f, 15482.0f, 15483.0f, 15484.0f, 15485.0f, 15486.0f, 15487.0f, 15488.0f, 15489.0f, 15490.0f, 15491.0f, 15492.0f, 15493.0f, 15494.0f, 15495.0f, 15496.0f, 15497.0f, 15498.0f, 15499.0f, 15500.0f, 15501.0f, 15502.0f, 15503.0f, 15504.0f, 15505.0f, 15506.0f, 15507.0f, 15508.0f, 15509.0f, 15510.0f, 15511.0f, 15512.0f, 15513.0f, 15514.0f, 15515.0f, 15516.0f, 15517.0f, 15518.0f, 15519.0f, 15520.0f, 15521.0f, 15522.0f, 15523.0f, 15524.0f, 15525.0f, 15526.0f, 15527.0f, 15528.0f, 15529.0f, 15530.0f, 15531.0f, 15532.0f, 15533.0f, 15534.0f, 15535.0f, 15536.0f, 15537.0f, 15538.0f, 15539.0f, 15540.0f, 15541.0f, 15542.0f, 15543.0f, 15544.0f, 15545.0f, 15546.0f, 15547.0f, 15548.0f, 15549.0f, 15550.0f, 15551.0f, 15552.0f, 15553.0f, 15554.0f, 15555.0f, 15556.0f, 15557.0f, 15558.0f, 15559.0f, 15560.0f, 15561.0f, 15562.0f, 15563.0f, 15564.0f, 15565.0f, 15566.0f, 15567.0f, 15568.0f, 15569.0f, 15570.0f, 15571.0f, 15572.0f, 15573.0f, 15574.0f, 15575.0f, 15576.0f, 15577.0f, 15578.0f, 15579.0f, 15580.0f, 15581.0f, 15582.0f, 15583.0f, 15584.0f, 15585.0f, 15586.0f, 15587.0f, 15588.0f, 15589.0f, 15590.0f, 15591.0f, 15592.0f, 15593.0f, 15594.0f, 15595.0f, 15596.0f, 15597.0f, 15598.0f, 15599.0f, 15600.0f, 15601.0f, 15602.0f, 15603.0f, 15604.0f, 15605.0f, 15606.0f, 15607.0f, 15608.0f, 15609.0f, 15610.0f, 15611.0f, 15612.0f, 15613.0f, 15614.0f, 15615.0f, 15616.0f, 15617.0f, 15618.0f, 15619.0f, 15620.0f, 15621.0f, 15622.0f, 15623.0f, 15624.0f, 15625.0f, 15626.0f, 15627.0f, 15628.0f, 15629.0f, 15630.0f, 15631.0f, 15632.0f, 15633.0f, 15634.0f, 15635.0f, 15636.0f, 15637.0f, 15638.0f, 15639.0f, 15640.0f, 15641.0f, 15642.0f, 15643.0f, 15644.0f, 15645.0f, 15646.0f, 15647.0f, 15648.0f, 15649.0f, 15650.0f, 15651.0f, 15652.0f, 15653.0f, 15654.0f, 15655.0f, 15656.0f, 15657.0f, 15658.0f, 15659.0f, 15660.0f, 15661.0f, 15662.0f, 15663.0f, 15664.0f, 15665.0f, 15666.0f, 15667.0f, 15668.0f, 15669.0f, 15670.0f, 15671.0f, 15672.0f, 15673.0f, 15674.0f, 15675.0f, 15676.0f, 15677.0f, 15678.0f, 15679.0f, 15680.0f, 15681.0f, 15682.0f, 15683.0f, 15684.0f, 15685.0f, 15686.0f, 15687.0f, 15688.0f, 15689.0f, 15690.0f, 15691.0f, 15692.0f, 15693.0f, 15694.0f, 15695.0f, 15696.0f, 15697.0f, 15698.0f, 15699.0f, 15700.0f, 15701.0f, 15702.0f, 15703.0f, 15704.0f, 15705.0f, 15706.0f, 15707.0f, 15708.0f, 15709.0f, 15710.0f, 15711.0f, 15712.0f, 15713.0f, 15714.0f, 15715.0f, 15716.0f, 15717.0f, 15718.0f, 15719.0f, 15720.0f, 15721.0f, 15722.0f, 15723.0f, 15724.0f, 15725.0f, 15726.0f, 15727.0f, 15728.0f, 15729.0f, 15730.0f, 15731.0f, 15732.0f, 15733.0f, 15734.0f, 15735.0f, 15736.0f, 15737.0f, 15738.0f, 15739.0f, 15740.0f, 15741.0f, 15742.0f, 15743.0f, 15744.0f, 15745.0f, 15746.0f, 15747.0f, 15748.0f, 15749.0f, 15750.0f, 15751.0f, 15752.0f, 15753.0f, 15754.0f, 15755.0f, 15756.0f, 15757.0f, 15758.0f, 15759.0f, 15760.0f, 15761.0f, 15762.0f, 15763.0f, 15764.0f, 15765.0f, 15766.0f, 15767.0f, 15768.0f, 15769.0f, 15770.0f, 15771.0f, 15772.0f, 15773.0f, 15774.0f, 15775.0f, 15776.0f, 15777.0f, 15778.0f, 15779.0f, 15780.0f, 15781.0f, 15782.0f, 15783.0f, 15784.0f, 15785.0f, 15786.0f, 15787.0f, 15788.0f, 15789.0f, 15790.0f, 15791.0f, 15792.0f, 15793.0f, 15794.0f, 15795.0f, 15796.0f, 15797.0f, 15798.0f, 15799.0f, 15800.0f, 15801.0f, 15802.0f, 15803.0f, 15804.0f, 15805.0f, 15806.0f, 15807.0f, 15808.0f, 15809.0f, 15810.0f, 15811.0f, 15812.0f, 15813.0f, 15814.0f, 15815.0f, 15816.0f, 15817.0f, 15818.0f, 15819.0f, 15820.0f, 15821.0f, 15822.0f, 15823.0f, 15824.0f, 15825.0f, 15826.0f, 15827.0f, 15828.0f, 15829.0f, 15830.0f, 15831.0f, 15832.0f, 15833.0f, 15834.0f, 15835.0f, 15836.0f, 15837.0f, 15838.0f, 15839.0f, 15840.0f, 15841.0f, 15842.0f, 15843.0f, 15844.0f, 15845.0f, 15846.0f, 15847.0f, 15848.0f, 15849.0f, 15850.0f, 15851.0f, 15852.0f, 15853.0f, 15854.0f, 15855.0f, 15856.0f, 15857.0f, 15858.0f, 15859.0f, 15860.0f, 15861.0f, 15862.0f, 15863.0f, 15864.0f, 15865.0f, 15866.0f, 15867.0f, 15868.0f, 15869.0f, 15870.0f, 15871.0f, 15872.0f, 15873.0f, 15874.0f, 15875.0f, 15876.0f, 15877.0f, 15878.0f, 15879.0f, 15880.0f, 15881.0f, 15882.0f, 15883.0f, 15884.0f, 15885.0f, 15886.0f, 15887.0f, 15888.0f, 15889.0f, 15890.0f, 15891.0f, 15892.0f, 15893.0f, 15894.0f, 15895.0f, 15896.0f, 15897.0f, 15898.0f, 15899.0f, 15900.0f, 15901.0f, 15902.0f, 15903.0f, 15904.0f, 15905.0f, 15906.0f, 15907.0f, 15908.0f, 15909.0f, 15910.0f, 15911.0f, 15912.0f, 15913.0f, 15914.0f, 15915.0f, 15916.0f, 15917.0f, 15918.0f, 15919.0f, 15920.0f, 15921.0f, 15922.0f, 15923.0f, 15924.0f, 15925.0f, 15926.0f, 15927.0f, 15928.0f, 15929.0f, 15930.0f, 15931.0f, 15932.0f, 15933.0f, 15934.0f, 15935.0f, 15936.0f, 15937.0f, 15938.0f, 15939.0f, 15940.0f, 15941.0f, 15942.0f, 15943.0f, 15944.0f, 15945.0f, 15946.0f, 15947.0f, 15948.0f, 15949.0f, 15950.0f, 15951.0f, 15952.0f, 15953.0f, 15954.0f, 15955.0f, 15956.0f, 15957.0f, 15958.0f, 15959.0f, 15960.0f, 15961.0f, 15962.0f, 15963.0f, 15964.0f, 15965.0f, 15966.0f, 15967.0f, 15968.0f, 15969.0f, 15970.0f, 15971.0f, 15972.0f, 15973.0f, 15974.0f, 15975.0f, 15976.0f, 15977.0f, 15978.0f, 15979.0f, 15980.0f, 15981.0f, 15982.0f, 15983.0f, 15984.0f, 15985.0f, 15986.0f, 15987.0f, 15988.0f, 15989.0f, 15990.0f, 15991.0f, 15992.0f, 15993.0f, 15994.0f, 15995.0f, 15996.0f, 15997.0f, 15998.0f, 15999.0f, 16000.0f, 16001.0f, 16002.0f, 16003.0f, 16004.0f, 16005.0f, 16006.0f, 16007.0f, 16008.0f, 16009.0f, 16010.0f, 16011.0f, 16012.0f, 16013.0f, 16014.0f, 16015.0f, 16016.0f, 16017.0f, 16018.0f, 16019.0f, 16020.0f, 16021.0f, 16022.0f, 16023.0f, 16024.0f, 16025.0f, 16026.0f, 16027.0f, 16028.0f, 16029.0f, 16030.0f, 16031.0f, 16032.0f, 16033.0f, 16034.0f, 16035.0f, 16036.0f, 16037.0f, 16038.0f, 16039.0f, 16040.0f, 16041.0f, 16042.0f, 16043.0f, 16044.0f, 16045.0f, 16046.0f, 16047.0f, 16048.0f, 16049.0f, 16050.0f, 16051.0f, 16052.0f, 16053.0f, 16054.0f, 16055.0f, 16056.0f, 16057.0f, 16058.0f, 16059.0f, 16060.0f, 16061.0f, 16062.0f, 16063.0f, 16064.0f, 16065.0f, 16066.0f, 16067.0f, 16068.0f, 16069.0f, 16070.0f, 16071.0f, 16072.0f, 16073.0f, 16074.0f, 16075.0f, 16076.0f, 16077.0f, 16078.0f, 16079.0f, 16080.0f, 16081.0f, 16082.0f, 16083.0f, 16084.0f, 16085.0f, 16086.0f, 16087.0f, 16088.0f, 16089.0f, 16090.0f, 16091.0f, 16092.0f, 16093.0f, 16094.0f, 16095.0f, 16096.0f, 16097.0f, 16098.0f, 16099.0f, 16100.0f, 16101.0f, 16102.0f, 16103.0f, 16104.0f, 16105.0f, 16106.0f, 16107.0f, 16108.0f, 16109.0f, 16110.0f, 16111.0f, 16112.0f, 16113.0f, 16114.0f, 16115.0f, 16116.0f, 16117.0f, 16118.0f, 16119.0f, 16120.0f, 16121.0f, 16122.0f, 16123.0f, 16124.0f, 16125.0f, 16126.0f, 16127.0f, 16128.0f, 16129.0f, 16130.0f, 16131.0f, 16132.0f, 16133.0f, 16134.0f, 16135.0f, 16136.0f, 16137.0f, 16138.0f, 16139.0f, 16140.0f, 16141.0f, 16142.0f, 16143.0f, 16144.0f, 16145.0f, 16146.0f, 16147.0f, 16148.0f, 16149.0f, 16150.0f, 16151.0f, 16152.0f, 16153.0f, 16154.0f, 16155.0f, 16156.0f, 16157.0f, 16158.0f, 16159.0f, 16160.0f, 16161.0f, 16162.0f, 16163.0f, 16164.0f, 16165.0f, 16166.0f, 16167.0f, 16168.0f, 16169.0f, 16170.0f, 16171.0f, 16172.0f, 16173.0f, 16174.0f, 16175.0f, 16176.0f, 16177.0f, 16178.0f, 16179.0f, 16180.0f, 16181.0f, 16182.0f, 16183.0f, 16184.0f, 16185.0f, 16186.0f, 16187.0f, 16188.0f, 16189.0f, 16190.0f, 16191.0f, 16192.0f, 16193.0f, 16194.0f, 16195.0f, 16196.0f, 16197.0f, 16198.0f, 16199.0f, 16200.0f, 16201.0f, 16202.0f, 16203.0f, 16204.0f, 16205.0f, 16206.0f, 16207.0f, 16208.0f, 16209.0f, 16210.0f, 16211.0f, 16212.0f, 16213.0f, 16214.0f, 16215.0f, 16216.0f, 16217.0f, 16218.0f, 16219.0f, 16220.0f, 16221.0f, 16222.0f, 16223.0f, 16224.0f, 16225.0f, 16226.0f, 16227.0f, 16228.0f, 16229.0f, 16230.0f, 16231.0f, 16232.0f, 16233.0f, 16234.0f, 16235.0f, 16236.0f, 16237.0f, 16238.0f, 16239.0f, 16240.0f, 16241.0f, 16242.0f, 16243.0f, 16244.0f, 16245.0f, 16246.0f, 16247.0f, 16248.0f, 16249.0f, 16250.0f, 16251.0f, 16252.0f, 16253.0f, 16254.0f, 16255.0f, 16256.0f, 16257.0f, 16258.0f, 16259.0f, 16260.0f, 16261.0f, 16262.0f, 16263.0f, 16264.0f, 16265.0f, 16266.0f, 16267.0f, 16268.0f, 16269.0f, 16270.0f, 16271.0f, 16272.0f, 16273.0f, 16274.0f, 16275.0f, 16276.0f, 16277.0f, 16278.0f, 16279.0f, 16280.0f, 16281.0f, 16282.0f, 16283.0f, 16284.0f, 16285.0f, 16286.0f, 16287.0f, 16288.0f, 16289.0f, 16290.0f, 16291.0f, 16292.0f, 16293.0f, 16294.0f, 16295.0f, 16296.0f, 16297.0f, 16298.0f, 16299.0f, 16300.0f, 16301.0f, 16302.0f, 16303.0f, 16304.0f, 16305.0f, 16306.0f, 16307.0f, 16308.0f, 16309.0f, 16310.0f, 16311.0f, 16312.0f, 16313.0f, 16314.0f, 16315.0f, 16316.0f, 16317.0f, 16318.0f, 16319.0f, 16320.0f, 16321.0f, 16322.0f, 16323.0f, 16324.0f, 16325.0f, 16326.0f, 16327.0f, 16328.0f, 16329.0f, 16330.0f, 16331.0f, 16332.0f, 16333.0f, 16334.0f, 16335.0f, 16336.0f, 16337.0f, 16338.0f, 16339.0f, 16340.0f, 16341.0f, 16342.0f, 16343.0f, 16344.0f, 16345.0f, 16346.0f, 16347.0f, 16348.0f, 16349.0f, 16350.0f, 16351.0f, 16352.0f, 16353.0f, 16354.0f, 16355.0f, 16356.0f, 16357.0f, 16358.0f, 16359.0f, 16360.0f, 16361.0f, 16362.0f, 16363.0f, 16364.0f, 16365.0f, 16366.0f, 16367.0f, 16368.0f, 16369.0f, 16370.0f, 16371.0f, 16372.0f, 16373.0f, 16374.0f, 16375.0f, 16376.0f, 16377.0f, 16378.0f, 16379.0f, 16380.0f, 16381.0f, 16382.0f, 16383.0f, 16384.0f, 16385.0f, 16386.0f, 16387.0f, 16388.0f, 16389.0f, 16390.0f, 16391.0f, 16392.0f, 16393.0f, 16394.0f, 16395.0f, 16396.0f, 16397.0f, 16398.0f, 16399.0f, 16400.0f, 16401.0f, 16402.0f, 16403.0f, 16404.0f, 16405.0f, 16406.0f, 16407.0f, 16408.0f, 16409.0f, 16410.0f, 16411.0f, 16412.0f, 16413.0f, 16414.0f, 16415.0f, 16416.0f, 16417.0f, 16418.0f, 16419.0f, 16420.0f, 16421.0f, 16422.0f, 16423.0f, 16424.0f, 16425.0f, 16426.0f, 16427.0f, 16428.0f, 16429.0f, 16430.0f, 16431.0f, 16432.0f, 16433.0f, 16434.0f, 16435.0f, 16436.0f, 16437.0f, 16438.0f, 16439.0f, 16440.0f, 16441.0f, 16442.0f, 16443.0f, 16444.0f, 16445.0f, 16446.0f, 16447.0f, 16448.0f, 16449.0f, 16450.0f, 16451.0f, 16452.0f, 16453.0f, 16454.0f, 16455.0f, 16456.0f, 16457.0f, 16458.0f, 16459.0f, 16460.0f, 16461.0f, 16462.0f, 16463.0f, 16464.0f, 16465.0f, 16466.0f, 16467.0f, 16468.0f, 16469.0f, 16470.0f, 16471.0f, 16472.0f, 16473.0f, 16474.0f, 16475.0f, 16476.0f, 16477.0f, 16478.0f, 16479.0f, 16480.0f, 16481.0f, 16482.0f, 16483.0f, 16484.0f, 16485.0f, 16486.0f, 16487.0f, 16488.0f, 16489.0f, 16490.0f, 16491.0f, 16492.0f, 16493.0f, 16494.0f, 16495.0f, 16496.0f, 16497.0f, 16498.0f, 16499.0f, 16500.0f, 16501.0f, 16502.0f, 16503.0f, 16504.0f, 16505.0f, 16506.0f, 16507.0f, 16508.0f, 16509.0f, 16510.0f, 16511.0f, 16512.0f, 16513.0f, 16514.0f, 16515.0f, 16516.0f, 16517.0f, 16518.0f, 16519.0f, 16520.0f, 16521.0f, 16522.0f, 16523.0f, 16524.0f, 16525.0f, 16526.0f, 16527.0f, 16528.0f, 16529.0f, 16530.0f, 16531.0f, 16532.0f, 16533.0f, 16534.0f, 16535.0f, 16536.0f, 16537.0f, 16538.0f, 16539.0f, 16540.0f, 16541.0f, 16542.0f, 16543.0f, 16544.0f, 16545.0f, 16546.0f, 16547.0f, 16548.0f, 16549.0f, 16550.0f, 16551.0f, 16552.0f, 16553.0f, 16554.0f, 16555.0f, 16556.0f, 16557.0f, 16558.0f, 16559.0f, 16560.0f, 16561.0f, 16562.0f, 16563.0f, 16564.0f, 16565.0f, 16566.0f, 16567.0f, 16568.0f, 16569.0f, 16570.0f, 16571.0f, 16572.0f, 16573.0f, 16574.0f, 16575.0f, 16576.0f, 16577.0f, 16578.0f, 16579.0f, 16580.0f, 16581.0f, 16582.0f, 16583.0f, 16584.0f, 16585.0f, 16586.0f, 16587.0f, 16588.0f, 16589.0f, 16590.0f, 16591.0f, 16592.0f, 16593.0f, 16594.0f, 16595.0f, 16596.0f, 16597.0f, 16598.0f, 16599.0f, 16600.0f, 16601.0f, 16602.0f, 16603.0f, 16604.0f, 16605.0f, 16606.0f, 16607.0f, 16608.0f, 16609.0f, 16610.0f, 16611.0f, 16612.0f, 16613.0f, 16614.0f, 16615.0f, 16616.0f, 16617.0f, 16618.0f, 16619.0f, 16620.0f, 16621.0f, 16622.0f, 16623.0f, 16624.0f, 16625.0f, 16626.0f, 16627.0f, 16628.0f, 16629.0f, 16630.0f, 16631.0f, 16632.0f, 16633.0f, 16634.0f, 16635.0f, 16636.0f, 16637.0f, 16638.0f, 16639.0f, 16640.0f, 16641.0f, 16642.0f, 16643.0f, 16644.0f, 16645.0f, 16646.0f, 16647.0f, 16648.0f, 16649.0f, 16650.0f, 16651.0f, 16652.0f, 16653.0f, 16654.0f, 16655.0f, 16656.0f, 16657.0f, 16658.0f, 16659.0f, 16660.0f, 16661.0f, 16662.0f, 16663.0f, 16664.0f, 16665.0f, 16666.0f, 16667.0f, 16668.0f, 16669.0f, 16670.0f, 16671.0f, 16672.0f, 16673.0f, 16674.0f, 16675.0f, 16676.0f, 16677.0f, 16678.0f, 16679.0f, 16680.0f, 16681.0f, 16682.0f, 16683.0f, 16684.0f, 16685.0f, 16686.0f, 16687.0f, 16688.0f, 16689.0f, 16690.0f, 16691.0f, 16692.0f, 16693.0f, 16694.0f, 16695.0f, 16696.0f, 16697.0f, 16698.0f, 16699.0f, 16700.0f, 16701.0f, 16702.0f, 16703.0f, 16704.0f, 16705.0f, 16706.0f, 16707.0f, 16708.0f, 16709.0f, 16710.0f, 16711.0f, 16712.0f, 16713.0f, 16714.0f, 16715.0f, 16716.0f, 16717.0f, 16718.0f, 16719.0f, 16720.0f, 16721.0f, 16722.0f, 16723.0f, 16724.0f, 16725.0f, 16726.0f, 16727.0f, 16728.0f, 16729.0f, 16730.0f, 16731.0f, 16732.0f, 16733.0f, 16734.0f, 16735.0f, 16736.0f, 16737.0f, 16738.0f, 16739.0f, 16740.0f, 16741.0f, 16742.0f, 16743.0f, 16744.0f, 16745.0f, 16746.0f, 16747.0f, 16748.0f, 16749.0f, 16750.0f, 16751.0f, 16752.0f, 16753.0f, 16754.0f, 16755.0f, 16756.0f, 16757.0f, 16758.0f, 16759.0f, 16760.0f, 16761.0f, 16762.0f, 16763.0f, 16764.0f, 16765.0f, 16766.0f, 16767.0f, 16768.0f, 16769.0f, 16770.0f, 16771.0f, 16772.0f, 16773.0f, 16774.0f, 16775.0f, 16776.0f, 16777.0f, 16778.0f, 16779.0f, 16780.0f, 16781.0f, 16782.0f, 16783.0f, 16784.0f, 16785.0f, 16786.0f, 16787.0f, 16788.0f, 16789.0f, 16790.0f, 16791.0f, 16792.0f, 16793.0f, 16794.0f, 16795.0f, 16796.0f, 16797.0f, 16798.0f, 16799.0f, 16800.0f, 16801.0f, 16802.0f, 16803.0f, 16804.0f, 16805.0f, 16806.0f, 16807.0f, 16808.0f, 16809.0f, 16810.0f, 16811.0f, 16812.0f, 16813.0f, 16814.0f, 16815.0f, 16816.0f, 16817.0f, 16818.0f, 16819.0f, 16820.0f, 16821.0f, 16822.0f, 16823.0f, 16824.0f, 16825.0f, 16826.0f, 16827.0f, 16828.0f, 16829.0f, 16830.0f, 16831.0f, 16832.0f, 16833.0f, 16834.0f, 16835.0f, 16836.0f, 16837.0f, 16838.0f, 16839.0f, 16840.0f, 16841.0f, 16842.0f, 16843.0f, 16844.0f, 16845.0f, 16846.0f, 16847.0f, 16848.0f, 16849.0f, 16850.0f, 16851.0f, 16852.0f, 16853.0f, 16854.0f, 16855.0f, 16856.0f, 16857.0f, 16858.0f, 16859.0f, 16860.0f, 16861.0f, 16862.0f, 16863.0f, 16864.0f, 16865.0f, 16866.0f, 16867.0f, 16868.0f, 16869.0f, 16870.0f, 16871.0f, 16872.0f, 16873.0f, 16874.0f, 16875.0f, 16876.0f, 16877.0f, 16878.0f, 16879.0f, 16880.0f, 16881.0f, 16882.0f, 16883.0f, 16884.0f, 16885.0f, 16886.0f, 16887.0f, 16888.0f, 16889.0f, 16890.0f, 16891.0f, 16892.0f, 16893.0f, 16894.0f, 16895.0f, 16896.0f, 16897.0f, 16898.0f, 16899.0f, 16900.0f, 16901.0f, 16902.0f, 16903.0f, 16904.0f, 16905.0f, 16906.0f, 16907.0f, 16908.0f, 16909.0f, 16910.0f, 16911.0f, 16912.0f, 16913.0f, 16914.0f, 16915.0f, 16916.0f, 16917.0f, 16918.0f, 16919.0f, 16920.0f, 16921.0f, 16922.0f, 16923.0f, 16924.0f, 16925.0f, 16926.0f, 16927.0f, 16928.0f, 16929.0f, 16930.0f, 16931.0f, 16932.0f, 16933.0f, 16934.0f, 16935.0f, 16936.0f, 16937.0f, 16938.0f, 16939.0f, 16940.0f, 16941.0f, 16942.0f, 16943.0f, 16944.0f, 16945.0f, 16946.0f, 16947.0f, 16948.0f, 16949.0f, 16950.0f, 16951.0f, 16952.0f, 16953.0f, 16954.0f, 16955.0f, 16956.0f, 16957.0f, 16958.0f, 16959.0f, 16960.0f, 16961.0f, 16962.0f, 16963.0f, 16964.0f, 16965.0f, 16966.0f, 16967.0f, 16968.0f, 16969.0f, 16970.0f, 16971.0f, 16972.0f, 16973.0f, 16974.0f, 16975.0f, 16976.0f, 16977.0f, 16978.0f, 16979.0f, 16980.0f, 16981.0f, 16982.0f, 16983.0f, 16984.0f, 16985.0f, 16986.0f, 16987.0f, 16988.0f, 16989.0f, 16990.0f, 16991.0f, 16992.0f, 16993.0f, 16994.0f, 16995.0f, 16996.0f, 16997.0f, 16998.0f, 16999.0f, 17000.0f, 17001.0f, 17002.0f, 17003.0f, 17004.0f, 17005.0f, 17006.0f, 17007.0f, 17008.0f, 17009.0f, 17010.0f, 17011.0f, 17012.0f, 17013.0f, 17014.0f, 17015.0f, 17016.0f, 17017.0f, 17018.0f, 17019.0f, 17020.0f, 17021.0f, 17022.0f, 17023.0f, 17024.0f, 17025.0f, 17026.0f, 17027.0f, 17028.0f, 17029.0f, 17030.0f, 17031.0f, 17032.0f, 17033.0f, 17034.0f, 17035.0f, 17036.0f, 17037.0f, 17038.0f, 17039.0f, 17040.0f, 17041.0f, 17042.0f, 17043.0f, 17044.0f, 17045.0f, 17046.0f, 17047.0f, 17048.0f, 17049.0f, 17050.0f, 17051.0f, 17052.0f, 17053.0f, 17054.0f, 17055.0f, 17056.0f, 17057.0f, 17058.0f, 17059.0f, 17060.0f, 17061.0f, 17062.0f, 17063.0f, 17064.0f, 17065.0f, 17066.0f, 17067.0f, 17068.0f, 17069.0f, 17070.0f, 17071.0f, 17072.0f, 17073.0f, 17074.0f, 17075.0f, 17076.0f, 17077.0f, 17078.0f, 17079.0f, 17080.0f, 17081.0f, 17082.0f, 17083.0f, 17084.0f, 17085.0f, 17086.0f, 17087.0f, 17088.0f, 17089.0f, 17090.0f, 17091.0f, 17092.0f, 17093.0f, 17094.0f, 17095.0f, 17096.0f, 17097.0f, 17098.0f, 17099.0f, 17100.0f, 17101.0f, 17102.0f, 17103.0f, 17104.0f, 17105.0f, 17106.0f, 17107.0f, 17108.0f, 17109.0f, 17110.0f, 17111.0f, 17112.0f, 17113.0f, 17114.0f, 17115.0f, 17116.0f, 17117.0f, 17118.0f, 17119.0f, 17120.0f, 17121.0f, 17122.0f, 17123.0f, 17124.0f, 17125.0f, 17126.0f, 17127.0f, 17128.0f, 17129.0f, 17130.0f, 17131.0f, 17132.0f, 17133.0f, 17134.0f, 17135.0f, 17136.0f, 17137.0f, 17138.0f, 17139.0f, 17140.0f, 17141.0f, 17142.0f, 17143.0f, 17144.0f, 17145.0f, 17146.0f, 17147.0f, 17148.0f, 17149.0f, 17150.0f, 17151.0f, 17152.0f, 17153.0f, 17154.0f, 17155.0f, 17156.0f, 17157.0f, 17158.0f, 17159.0f, 17160.0f, 17161.0f, 17162.0f, 17163.0f, 17164.0f, 17165.0f, 17166.0f, 17167.0f, 17168.0f, 17169.0f, 17170.0f, 17171.0f, 17172.0f, 17173.0f, 17174.0f, 17175.0f, 17176.0f, 17177.0f, 17178.0f, 17179.0f, 17180.0f, 17181.0f, 17182.0f, 17183.0f, 17184.0f, 17185.0f, 17186.0f, 17187.0f, 17188.0f, 17189.0f, 17190.0f, 17191.0f, 17192.0f, 17193.0f, 17194.0f, 17195.0f, 17196.0f, 17197.0f, 17198.0f, 17199.0f, 17200.0f, 17201.0f, 17202.0f, 17203.0f, 17204.0f, 17205.0f, 17206.0f, 17207.0f, 17208.0f, 17209.0f, 17210.0f, 17211.0f, 17212.0f, 17213.0f, 17214.0f, 17215.0f, 17216.0f, 17217.0f, 17218.0f, 17219.0f, 17220.0f, 17221.0f, 17222.0f, 17223.0f, 17224.0f, 17225.0f, 17226.0f, 17227.0f, 17228.0f, 17229.0f, 17230.0f, 17231.0f, 17232.0f, 17233.0f, 17234.0f, 17235.0f, 17236.0f, 17237.0f, 17238.0f, 17239.0f, 17240.0f, 17241.0f, 17242.0f, 17243.0f, 17244.0f, 17245.0f, 17246.0f, 17247.0f, 17248.0f, 17249.0f, 17250.0f, 17251.0f, 17252.0f, 17253.0f, 17254.0f, 17255.0f, 17256.0f, 17257.0f, 17258.0f, 17259.0f, 17260.0f, 17261.0f, 17262.0f, 17263.0f, 17264.0f, 17265.0f, 17266.0f, 17267.0f, 17268.0f, 17269.0f, 17270.0f, 17271.0f, 17272.0f, 17273.0f, 17274.0f, 17275.0f, 17276.0f, 17277.0f, 17278.0f, 17279.0f, 17280.0f, 17281.0f, 17282.0f, 17283.0f, 17284.0f, 17285.0f, 17286.0f, 17287.0f, 17288.0f, 17289.0f, 17290.0f, 17291.0f, 17292.0f, 17293.0f, 17294.0f, 17295.0f, 17296.0f, 17297.0f, 17298.0f, 17299.0f, 17300.0f, 17301.0f, 17302.0f, 17303.0f, 17304.0f, 17305.0f, 17306.0f, 17307.0f, 17308.0f, 17309.0f, 17310.0f, 17311.0f, 17312.0f, 17313.0f, 17314.0f, 17315.0f, 17316.0f, 17317.0f, 17318.0f, 17319.0f, 17320.0f, 17321.0f, 17322.0f, 17323.0f, 17324.0f, 17325.0f, 17326.0f, 17327.0f, 17328.0f, 17329.0f, 17330.0f, 17331.0f, 17332.0f, 17333.0f, 17334.0f, 17335.0f, 17336.0f, 17337.0f, 17338.0f, 17339.0f, 17340.0f, 17341.0f, 17342.0f, 17343.0f, 17344.0f, 17345.0f, 17346.0f, 17347.0f, 17348.0f, 17349.0f, 17350.0f, 17351.0f, 17352.0f, 17353.0f, 17354.0f, 17355.0f, 17356.0f, 17357.0f, 17358.0f, 17359.0f, 17360.0f, 17361.0f, 17362.0f, 17363.0f, 17364.0f, 17365.0f, 17366.0f, 17367.0f, 17368.0f, 17369.0f, 17370.0f, 17371.0f, 17372.0f, 17373.0f, 17374.0f, 17375.0f, 17376.0f, 17377.0f, 17378.0f, 17379.0f, 17380.0f, 17381.0f, 17382.0f, 17383.0f, 17384.0f, 17385.0f, 17386.0f, 17387.0f, 17388.0f, 17389.0f, 17390.0f, 17391.0f, 17392.0f, 17393.0f, 17394.0f, 17395.0f, 17396.0f, 17397.0f, 17398.0f, 17399.0f, 17400.0f, 17401.0f, 17402.0f, 17403.0f, 17404.0f, 17405.0f, 17406.0f, 17407.0f, 17408.0f, 17409.0f, 17410.0f, 17411.0f, 17412.0f, 17413.0f, 17414.0f, 17415.0f, 17416.0f, 17417.0f, 17418.0f, 17419.0f, 17420.0f, 17421.0f, 17422.0f, 17423.0f, 17424.0f, 17425.0f, 17426.0f, 17427.0f, 17428.0f, 17429.0f, 17430.0f, 17431.0f, 17432.0f, 17433.0f, 17434.0f, 17435.0f, 17436.0f, 17437.0f, 17438.0f, 17439.0f, 17440.0f, 17441.0f, 17442.0f, 17443.0f, 17444.0f, 17445.0f, 17446.0f, 17447.0f, 17448.0f, 17449.0f, 17450.0f, 17451.0f, 17452.0f, 17453.0f, 17454.0f, 17455.0f, 17456.0f, 17457.0f, 17458.0f, 17459.0f, 17460.0f, 17461.0f, 17462.0f, 17463.0f, 17464.0f, 17465.0f, 17466.0f, 17467.0f, 17468.0f, 17469.0f, 17470.0f, 17471.0f, 17472.0f, 17473.0f, 17474.0f, 17475.0f, 17476.0f, 17477.0f, 17478.0f, 17479.0f, 17480.0f, 17481.0f, 17482.0f, 17483.0f, 17484.0f, 17485.0f, 17486.0f, 17487.0f, 17488.0f, 17489.0f, 17490.0f, 17491.0f, 17492.0f, 17493.0f, 17494.0f, 17495.0f, 17496.0f, 17497.0f, 17498.0f, 17499.0f, 17500.0f, 17501.0f, 17502.0f, 17503.0f, 17504.0f, 17505.0f, 17506.0f, 17507.0f, 17508.0f, 17509.0f, 17510.0f, 17511.0f, 17512.0f, 17513.0f, 17514.0f, 17515.0f, 17516.0f, 17517.0f, 17518.0f, 17519.0f, 17520.0f, 17521.0f, 17522.0f, 17523.0f, 17524.0f, 17525.0f, 17526.0f, 17527.0f, 17528.0f, 17529.0f, 17530.0f, 17531.0f, 17532.0f, 17533.0f, 17534.0f, 17535.0f, 17536.0f, 17537.0f, 17538.0f, 17539.0f, 17540.0f, 17541.0f, 17542.0f, 17543.0f, 17544.0f, 17545.0f, 17546.0f, 17547.0f, 17548.0f, 17549.0f, 17550.0f, 17551.0f, 17552.0f, 17553.0f, 17554.0f, 17555.0f, 17556.0f, 17557.0f, 17558.0f, 17559.0f, 17560.0f, 17561.0f, 17562.0f, 17563.0f, 17564.0f, 17565.0f, 17566.0f, 17567.0f, 17568.0f, 17569.0f, 17570.0f, 17571.0f, 17572.0f, 17573.0f, 17574.0f, 17575.0f, 17576.0f, 17577.0f, 17578.0f, 17579.0f, 17580.0f, 17581.0f, 17582.0f, 17583.0f, 17584.0f, 17585.0f, 17586.0f, 17587.0f, 17588.0f, 17589.0f, 17590.0f, 17591.0f, 17592.0f, 17593.0f, 17594.0f, 17595.0f, 17596.0f, 17597.0f, 17598.0f, 17599.0f, 17600.0f, 17601.0f, 17602.0f, 17603.0f, 17604.0f, 17605.0f, 17606.0f, 17607.0f, 17608.0f, 17609.0f, 17610.0f, 17611.0f, 17612.0f, 17613.0f, 17614.0f, 17615.0f, 17616.0f, 17617.0f, 17618.0f, 17619.0f, 17620.0f, 17621.0f, 17622.0f, 17623.0f, 17624.0f, 17625.0f, 17626.0f, 17627.0f, 17628.0f, 17629.0f, 17630.0f, 17631.0f, 17632.0f, 17633.0f, 17634.0f, 17635.0f, 17636.0f, 17637.0f, 17638.0f, 17639.0f, 17640.0f, 17641.0f, 17642.0f, 17643.0f, 17644.0f, 17645.0f, 17646.0f, 17647.0f, 17648.0f, 17649.0f, 17650.0f, 17651.0f, 17652.0f, 17653.0f, 17654.0f, 17655.0f, 17656.0f, 17657.0f, 17658.0f, 17659.0f, 17660.0f, 17661.0f, 17662.0f, 17663.0f, 17664.0f, 17665.0f, 17666.0f, 17667.0f, 17668.0f, 17669.0f, 17670.0f, 17671.0f, 17672.0f, 17673.0f, 17674.0f, 17675.0f, 17676.0f, 17677.0f, 17678.0f, 17679.0f, 17680.0f, 17681.0f, 17682.0f, 17683.0f, 17684.0f, 17685.0f, 17686.0f, 17687.0f, 17688.0f, 17689.0f, 17690.0f, 17691.0f, 17692.0f, 17693.0f, 17694.0f, 17695.0f, 17696.0f, 17697.0f, 17698.0f, 17699.0f, 17700.0f, 17701.0f, 17702.0f, 17703.0f, 17704.0f, 17705.0f, 17706.0f, 17707.0f, 17708.0f, 17709.0f, 17710.0f, 17711.0f, 17712.0f, 17713.0f, 17714.0f, 17715.0f, 17716.0f, 17717.0f, 17718.0f, 17719.0f, 17720.0f, 17721.0f, 17722.0f, 17723.0f, 17724.0f, 17725.0f, 17726.0f, 17727.0f, 17728.0f, 17729.0f, 17730.0f, 17731.0f, 17732.0f, 17733.0f, 17734.0f, 17735.0f, 17736.0f, 17737.0f, 17738.0f, 17739.0f, 17740.0f, 17741.0f, 17742.0f, 17743.0f, 17744.0f, 17745.0f, 17746.0f, 17747.0f, 17748.0f, 17749.0f, 17750.0f, 17751.0f, 17752.0f, 17753.0f, 17754.0f, 17755.0f, 17756.0f, 17757.0f, 17758.0f, 17759.0f, 17760.0f, 17761.0f, 17762.0f, 17763.0f, 17764.0f, 17765.0f, 17766.0f, 17767.0f, 17768.0f, 17769.0f, 17770.0f, 17771.0f, 17772.0f, 17773.0f, 17774.0f, 17775.0f, 17776.0f, 17777.0f, 17778.0f, 17779.0f, 17780.0f, 17781.0f, 17782.0f, 17783.0f, 17784.0f, 17785.0f, 17786.0f, 17787.0f, 17788.0f, 17789.0f, 17790.0f, 17791.0f, 17792.0f, 17793.0f, 17794.0f, 17795.0f, 17796.0f, 17797.0f, 17798.0f, 17799.0f, 17800.0f, 17801.0f, 17802.0f, 17803.0f, 17804.0f, 17805.0f, 17806.0f, 17807.0f, 17808.0f, 17809.0f, 17810.0f, 17811.0f, 17812.0f, 17813.0f, 17814.0f, 17815.0f, 17816.0f, 17817.0f, 17818.0f, 17819.0f, 17820.0f, 17821.0f, 17822.0f, 17823.0f, 17824.0f, 17825.0f, 17826.0f, 17827.0f, 17828.0f, 17829.0f, 17830.0f, 17831.0f, 17832.0f, 17833.0f, 17834.0f, 17835.0f, 17836.0f, 17837.0f, 17838.0f, 17839.0f, 17840.0f, 17841.0f, 17842.0f, 17843.0f, 17844.0f, 17845.0f, 17846.0f, 17847.0f, 17848.0f, 17849.0f, 17850.0f, 17851.0f, 17852.0f, 17853.0f, 17854.0f, 17855.0f, 17856.0f, 17857.0f, 17858.0f, 17859.0f, 17860.0f, 17861.0f, 17862.0f, 17863.0f, 17864.0f, 17865.0f, 17866.0f, 17867.0f, 17868.0f, 17869.0f, 17870.0f, 17871.0f, 17872.0f, 17873.0f, 17874.0f, 17875.0f, 17876.0f, 17877.0f, 17878.0f, 17879.0f, 17880.0f, 17881.0f, 17882.0f, 17883.0f, 17884.0f, 17885.0f, 17886.0f, 17887.0f, 17888.0f, 17889.0f, 17890.0f, 17891.0f, 17892.0f, 17893.0f, 17894.0f, 17895.0f, 17896.0f, 17897.0f, 17898.0f, 17899.0f, 17900.0f, 17901.0f, 17902.0f, 17903.0f, 17904.0f, 17905.0f, 17906.0f, 17907.0f, 17908.0f, 17909.0f, 17910.0f, 17911.0f, 17912.0f, 17913.0f, 17914.0f, 17915.0f, 17916.0f, 17917.0f, 17918.0f, 17919.0f, 17920.0f, 17921.0f, 17922.0f, 17923.0f, 17924.0f, 17925.0f, 17926.0f, 17927.0f, 17928.0f, 17929.0f, 17930.0f, 17931.0f, 17932.0f, 17933.0f, 17934.0f, 17935.0f, 17936.0f, 17937.0f, 17938.0f, 17939.0f, 17940.0f, 17941.0f, 17942.0f, 17943.0f, 17944.0f, 17945.0f, 17946.0f, 17947.0f, 17948.0f, 17949.0f, 17950.0f, 17951.0f, 17952.0f, 17953.0f, 17954.0f, 17955.0f, 17956.0f, 17957.0f, 17958.0f, 17959.0f, 17960.0f, 17961.0f, 17962.0f, 17963.0f, 17964.0f, 17965.0f, 17966.0f, 17967.0f, 17968.0f, 17969.0f, 17970.0f, 17971.0f, 17972.0f, 17973.0f, 17974.0f, 17975.0f, 17976.0f, 17977.0f, 17978.0f, 17979.0f, 17980.0f, 17981.0f, 17982.0f, 17983.0f, 17984.0f, 17985.0f, 17986.0f, 17987.0f, 17988.0f, 17989.0f, 17990.0f, 17991.0f, 17992.0f, 17993.0f, 17994.0f, 17995.0f, 17996.0f, 17997.0f, 17998.0f, 17999.0f, 18000.0f, 18001.0f, 18002.0f, 18003.0f, 18004.0f, 18005.0f, 18006.0f, 18007.0f, 18008.0f, 18009.0f, 18010.0f, 18011.0f, 18012.0f, 18013.0f, 18014.0f, 18015.0f, 18016.0f, 18017.0f, 18018.0f, 18019.0f, 18020.0f, 18021.0f, 18022.0f, 18023.0f, 18024.0f, 18025.0f, 18026.0f, 18027.0f, 18028.0f, 18029.0f, 18030.0f, 18031.0f, 18032.0f, 18033.0f, 18034.0f, 18035.0f, 18036.0f, 18037.0f, 18038.0f, 18039.0f, 18040.0f, 18041.0f, 18042.0f, 18043.0f, 18044.0f, 18045.0f, 18046.0f, 18047.0f, 18048.0f, 18049.0f, 18050.0f, 18051.0f, 18052.0f, 18053.0f, 18054.0f, 18055.0f, 18056.0f, 18057.0f, 18058.0f, 18059.0f, 18060.0f, 18061.0f, 18062.0f, 18063.0f, 18064.0f, 18065.0f, 18066.0f, 18067.0f, 18068.0f, 18069.0f, 18070.0f, 18071.0f, 18072.0f, 18073.0f, 18074.0f, 18075.0f, 18076.0f, 18077.0f, 18078.0f, 18079.0f, 18080.0f, 18081.0f, 18082.0f, 18083.0f, 18084.0f, 18085.0f, 18086.0f, 18087.0f, 18088.0f, 18089.0f, 18090.0f, 18091.0f, 18092.0f, 18093.0f, 18094.0f, 18095.0f, 18096.0f, 18097.0f, 18098.0f, 18099.0f, 18100.0f, 18101.0f, 18102.0f, 18103.0f, 18104.0f, 18105.0f, 18106.0f, 18107.0f, 18108.0f, 18109.0f, 18110.0f, 18111.0f, 18112.0f, 18113.0f, 18114.0f, 18115.0f, 18116.0f, 18117.0f, 18118.0f, 18119.0f, 18120.0f, 18121.0f, 18122.0f, 18123.0f, 18124.0f, 18125.0f, 18126.0f, 18127.0f, 18128.0f, 18129.0f, 18130.0f, 18131.0f, 18132.0f, 18133.0f, 18134.0f, 18135.0f, 18136.0f, 18137.0f, 18138.0f, 18139.0f, 18140.0f, 18141.0f, 18142.0f, 18143.0f, 18144.0f, 18145.0f, 18146.0f, 18147.0f, 18148.0f, 18149.0f, 18150.0f, 18151.0f, 18152.0f, 18153.0f, 18154.0f, 18155.0f, 18156.0f, 18157.0f, 18158.0f, 18159.0f, 18160.0f, 18161.0f, 18162.0f, 18163.0f, 18164.0f, 18165.0f, 18166.0f, 18167.0f, 18168.0f, 18169.0f, 18170.0f, 18171.0f, 18172.0f, 18173.0f, 18174.0f, 18175.0f, 18176.0f, 18177.0f, 18178.0f, 18179.0f, 18180.0f, 18181.0f, 18182.0f, 18183.0f, 18184.0f, 18185.0f, 18186.0f, 18187.0f, 18188.0f, 18189.0f, 18190.0f, 18191.0f, 18192.0f, 18193.0f, 18194.0f, 18195.0f, 18196.0f, 18197.0f, 18198.0f, 18199.0f, 18200.0f, 18201.0f, 18202.0f, 18203.0f, 18204.0f, 18205.0f, 18206.0f, 18207.0f, 18208.0f, 18209.0f, 18210.0f, 18211.0f, 18212.0f, 18213.0f, 18214.0f, 18215.0f, 18216.0f, 18217.0f, 18218.0f, 18219.0f, 18220.0f, 18221.0f, 18222.0f, 18223.0f, 18224.0f, 18225.0f, 18226.0f, 18227.0f, 18228.0f, 18229.0f, 18230.0f, 18231.0f, 18232.0f, 18233.0f, 18234.0f, 18235.0f, 18236.0f, 18237.0f, 18238.0f, 18239.0f, 18240.0f, 18241.0f, 18242.0f, 18243.0f, 18244.0f, 18245.0f, 18246.0f, 18247.0f, 18248.0f, 18249.0f, 18250.0f, 18251.0f, 18252.0f, 18253.0f, 18254.0f, 18255.0f, 18256.0f, 18257.0f, 18258.0f, 18259.0f, 18260.0f, 18261.0f, 18262.0f, 18263.0f, 18264.0f, 18265.0f, 18266.0f, 18267.0f, 18268.0f, 18269.0f, 18270.0f, 18271.0f, 18272.0f, 18273.0f, 18274.0f, 18275.0f, 18276.0f, 18277.0f, 18278.0f, 18279.0f, 18280.0f, 18281.0f, 18282.0f, 18283.0f, 18284.0f, 18285.0f, 18286.0f, 18287.0f, 18288.0f, 18289.0f, 18290.0f, 18291.0f, 18292.0f, 18293.0f, 18294.0f, 18295.0f, 18296.0f, 18297.0f, 18298.0f, 18299.0f, 18300.0f, 18301.0f, 18302.0f, 18303.0f, 18304.0f, 18305.0f, 18306.0f, 18307.0f, 18308.0f, 18309.0f, 18310.0f, 18311.0f, 18312.0f, 18313.0f, 18314.0f, 18315.0f, 18316.0f, 18317.0f, 18318.0f, 18319.0f, 18320.0f, 18321.0f, 18322.0f, 18323.0f, 18324.0f, 18325.0f, 18326.0f, 18327.0f, 18328.0f, 18329.0f, 18330.0f, 18331.0f, 18332.0f, 18333.0f, 18334.0f, 18335.0f, 18336.0f, 18337.0f, 18338.0f, 18339.0f, 18340.0f, 18341.0f, 18342.0f, 18343.0f, 18344.0f, 18345.0f, 18346.0f, 18347.0f, 18348.0f, 18349.0f, 18350.0f, 18351.0f, 18352.0f, 18353.0f, 18354.0f, 18355.0f, 18356.0f, 18357.0f, 18358.0f, 18359.0f, 18360.0f, 18361.0f, 18362.0f, 18363.0f, 18364.0f, 18365.0f, 18366.0f, 18367.0f, 18368.0f, 18369.0f, 18370.0f, 18371.0f, 18372.0f, 18373.0f, 18374.0f, 18375.0f, 18376.0f, 18377.0f, 18378.0f, 18379.0f, 18380.0f, 18381.0f, 18382.0f, 18383.0f, 18384.0f, 18385.0f, 18386.0f, 18387.0f, 18388.0f, 18389.0f, 18390.0f, 18391.0f, 18392.0f, 18393.0f, 18394.0f, 18395.0f, 18396.0f, 18397.0f, 18398.0f, 18399.0f, 18400.0f, 18401.0f, 18402.0f, 18403.0f, 18404.0f, 18405.0f, 18406.0f, 18407.0f, 18408.0f, 18409.0f, 18410.0f, 18411.0f, 18412.0f, 18413.0f, 18414.0f, 18415.0f, 18416.0f, 18417.0f, 18418.0f, 18419.0f, 18420.0f, 18421.0f, 18422.0f, 18423.0f, 18424.0f, 18425.0f, 18426.0f, 18427.0f, 18428.0f, 18429.0f, 18430.0f, 18431.0f, 18432.0f, 18433.0f, 18434.0f, 18435.0f, 18436.0f, 18437.0f, 18438.0f, 18439.0f, 18440.0f, 18441.0f, 18442.0f, 18443.0f, 18444.0f, 18445.0f, 18446.0f, 18447.0f, 18448.0f, 18449.0f, 18450.0f, 18451.0f, 18452.0f, 18453.0f, 18454.0f, 18455.0f, 18456.0f, 18457.0f, 18458.0f, 18459.0f, 18460.0f, 18461.0f, 18462.0f, 18463.0f, 18464.0f, 18465.0f, 18466.0f, 18467.0f, 18468.0f, 18469.0f, 18470.0f, 18471.0f, 18472.0f, 18473.0f, 18474.0f, 18475.0f, 18476.0f, 18477.0f, 18478.0f, 18479.0f, 18480.0f, 18481.0f, 18482.0f, 18483.0f, 18484.0f, 18485.0f, 18486.0f, 18487.0f, 18488.0f, 18489.0f, 18490.0f, 18491.0f, 18492.0f, 18493.0f, 18494.0f, 18495.0f, 18496.0f, 18497.0f, 18498.0f, 18499.0f, 18500.0f, 18501.0f, 18502.0f, 18503.0f, 18504.0f, 18505.0f, 18506.0f, 18507.0f, 18508.0f, 18509.0f, 18510.0f, 18511.0f, 18512.0f, 18513.0f, 18514.0f, 18515.0f, 18516.0f, 18517.0f, 18518.0f, 18519.0f, 18520.0f, 18521.0f, 18522.0f, 18523.0f, 18524.0f, 18525.0f, 18526.0f, 18527.0f, 18528.0f, 18529.0f, 18530.0f, 18531.0f, 18532.0f, 18533.0f, 18534.0f, 18535.0f, 18536.0f, 18537.0f, 18538.0f, 18539.0f, 18540.0f, 18541.0f, 18542.0f, 18543.0f, 18544.0f, 18545.0f, 18546.0f, 18547.0f, 18548.0f, 18549.0f, 18550.0f, 18551.0f, 18552.0f, 18553.0f, 18554.0f, 18555.0f, 18556.0f, 18557.0f, 18558.0f, 18559.0f, 18560.0f, 18561.0f, 18562.0f, 18563.0f, 18564.0f, 18565.0f, 18566.0f, 18567.0f, 18568.0f, 18569.0f, 18570.0f, 18571.0f, 18572.0f, 18573.0f, 18574.0f, 18575.0f, 18576.0f, 18577.0f, 18578.0f, 18579.0f, 18580.0f, 18581.0f, 18582.0f, 18583.0f, 18584.0f, 18585.0f, 18586.0f, 18587.0f, 18588.0f, 18589.0f, 18590.0f, 18591.0f, 18592.0f, 18593.0f, 18594.0f, 18595.0f, 18596.0f, 18597.0f, 18598.0f, 18599.0f, 18600.0f, 18601.0f, 18602.0f, 18603.0f, 18604.0f, 18605.0f, 18606.0f, 18607.0f, 18608.0f, 18609.0f, 18610.0f, 18611.0f, 18612.0f, 18613.0f, 18614.0f, 18615.0f, 18616.0f, 18617.0f, 18618.0f, 18619.0f, 18620.0f, 18621.0f, 18622.0f, 18623.0f, 18624.0f, 18625.0f, 18626.0f, 18627.0f, 18628.0f, 18629.0f, 18630.0f, 18631.0f, 18632.0f, 18633.0f, 18634.0f, 18635.0f, 18636.0f, 18637.0f, 18638.0f, 18639.0f, 18640.0f, 18641.0f, 18642.0f, 18643.0f, 18644.0f, 18645.0f, 18646.0f, 18647.0f, 18648.0f, 18649.0f, 18650.0f, 18651.0f, 18652.0f, 18653.0f, 18654.0f, 18655.0f, 18656.0f, 18657.0f, 18658.0f, 18659.0f, 18660.0f, 18661.0f, 18662.0f, 18663.0f, 18664.0f, 18665.0f, 18666.0f, 18667.0f, 18668.0f, 18669.0f, 18670.0f, 18671.0f, 18672.0f, 18673.0f, 18674.0f, 18675.0f, 18676.0f, 18677.0f, 18678.0f, 18679.0f, 18680.0f, 18681.0f, 18682.0f, 18683.0f, 18684.0f, 18685.0f, 18686.0f, 18687.0f, 18688.0f, 18689.0f, 18690.0f, 18691.0f, 18692.0f, 18693.0f, 18694.0f, 18695.0f, 18696.0f, 18697.0f, 18698.0f, 18699.0f, 18700.0f, 18701.0f, 18702.0f, 18703.0f, 18704.0f, 18705.0f, 18706.0f, 18707.0f, 18708.0f, 18709.0f, 18710.0f, 18711.0f, 18712.0f, 18713.0f, 18714.0f, 18715.0f, 18716.0f, 18717.0f, 18718.0f, 18719.0f, 18720.0f, 18721.0f, 18722.0f, 18723.0f, 18724.0f, 18725.0f, 18726.0f, 18727.0f, 18728.0f, 18729.0f, 18730.0f, 18731.0f, 18732.0f, 18733.0f, 18734.0f, 18735.0f, 18736.0f, 18737.0f, 18738.0f, 18739.0f, 18740.0f, 18741.0f, 18742.0f, 18743.0f, 18744.0f, 18745.0f, 18746.0f, 18747.0f, 18748.0f, 18749.0f, 18750.0f, 18751.0f, 18752.0f, 18753.0f, 18754.0f, 18755.0f, 18756.0f, 18757.0f, 18758.0f, 18759.0f, 18760.0f, 18761.0f, 18762.0f, 18763.0f, 18764.0f, 18765.0f, 18766.0f, 18767.0f, 18768.0f, 18769.0f, 18770.0f, 18771.0f, 18772.0f, 18773.0f, 18774.0f, 18775.0f, 18776.0f, 18777.0f, 18778.0f, 18779.0f, 18780.0f, 18781.0f, 18782.0f, 18783.0f, 18784.0f, 18785.0f, 18786.0f, 18787.0f, 18788.0f, 18789.0f, 18790.0f, 18791.0f, 18792.0f, 18793.0f, 18794.0f, 18795.0f, 18796.0f, 18797.0f, 18798.0f, 18799.0f, 18800.0f, 18801.0f, 18802.0f, 18803.0f, 18804.0f, 18805.0f, 18806.0f, 18807.0f, 18808.0f, 18809.0f, 18810.0f, 18811.0f, 18812.0f, 18813.0f, 18814.0f, 18815.0f, 18816.0f, 18817.0f, 18818.0f, 18819.0f, 18820.0f, 18821.0f, 18822.0f, 18823.0f, 18824.0f, 18825.0f, 18826.0f, 18827.0f, 18828.0f, 18829.0f, 18830.0f, 18831.0f, 18832.0f, 18833.0f, 18834.0f, 18835.0f, 18836.0f, 18837.0f, 18838.0f, 18839.0f, 18840.0f, 18841.0f, 18842.0f, 18843.0f, 18844.0f, 18845.0f, 18846.0f, 18847.0f, 18848.0f, 18849.0f, 18850.0f, 18851.0f, 18852.0f, 18853.0f, 18854.0f, 18855.0f, 18856.0f, 18857.0f, 18858.0f, 18859.0f, 18860.0f, 18861.0f, 18862.0f, 18863.0f, 18864.0f, 18865.0f, 18866.0f, 18867.0f, 18868.0f, 18869.0f, 18870.0f, 18871.0f, 18872.0f, 18873.0f, 18874.0f, 18875.0f, 18876.0f, 18877.0f, 18878.0f, 18879.0f, 18880.0f, 18881.0f, 18882.0f, 18883.0f, 18884.0f, 18885.0f, 18886.0f, 18887.0f, 18888.0f, 18889.0f, 18890.0f, 18891.0f, 18892.0f, 18893.0f, 18894.0f, 18895.0f, 18896.0f, 18897.0f, 18898.0f, 18899.0f, 18900.0f, 18901.0f, 18902.0f, 18903.0f, 18904.0f, 18905.0f, 18906.0f, 18907.0f, 18908.0f, 18909.0f, 18910.0f, 18911.0f, 18912.0f, 18913.0f, 18914.0f, 18915.0f, 18916.0f, 18917.0f, 18918.0f, 18919.0f, 18920.0f, 18921.0f, 18922.0f, 18923.0f, 18924.0f, 18925.0f, 18926.0f, 18927.0f, 18928.0f, 18929.0f, 18930.0f, 18931.0f, 18932.0f, 18933.0f, 18934.0f, 18935.0f, 18936.0f, 18937.0f, 18938.0f, 18939.0f, 18940.0f, 18941.0f, 18942.0f, 18943.0f, 18944.0f, 18945.0f, 18946.0f, 18947.0f, 18948.0f, 18949.0f, 18950.0f, 18951.0f, 18952.0f, 18953.0f, 18954.0f, 18955.0f, 18956.0f, 18957.0f, 18958.0f, 18959.0f, 18960.0f, 18961.0f, 18962.0f, 18963.0f, 18964.0f, 18965.0f, 18966.0f, 18967.0f, 18968.0f, 18969.0f, 18970.0f, 18971.0f, 18972.0f, 18973.0f, 18974.0f, 18975.0f, 18976.0f, 18977.0f, 18978.0f, 18979.0f, 18980.0f, 18981.0f, 18982.0f, 18983.0f, 18984.0f, 18985.0f, 18986.0f, 18987.0f, 18988.0f, 18989.0f, 18990.0f, 18991.0f, 18992.0f, 18993.0f, 18994.0f, 18995.0f, 18996.0f, 18997.0f, 18998.0f, 18999.0f, 19000.0f, 19001.0f, 19002.0f, 19003.0f, 19004.0f, 19005.0f, 19006.0f, 19007.0f, 19008.0f, 19009.0f, 19010.0f, 19011.0f, 19012.0f, 19013.0f, 19014.0f, 19015.0f, 19016.0f, 19017.0f, 19018.0f, 19019.0f, 19020.0f, 19021.0f, 19022.0f, 19023.0f, 19024.0f, 19025.0f, 19026.0f, 19027.0f, 19028.0f, 19029.0f, 19030.0f, 19031.0f, 19032.0f, 19033.0f, 19034.0f, 19035.0f, 19036.0f, 19037.0f, 19038.0f, 19039.0f, 19040.0f, 19041.0f, 19042.0f, 19043.0f, 19044.0f, 19045.0f, 19046.0f, 19047.0f, 19048.0f, 19049.0f, 19050.0f, 19051.0f, 19052.0f, 19053.0f, 19054.0f, 19055.0f, 19056.0f, 19057.0f, 19058.0f, 19059.0f, 19060.0f, 19061.0f, 19062.0f, 19063.0f, 19064.0f, 19065.0f, 19066.0f, 19067.0f, 19068.0f, 19069.0f, 19070.0f, 19071.0f, 19072.0f, 19073.0f, 19074.0f, 19075.0f, 19076.0f, 19077.0f, 19078.0f, 19079.0f, 19080.0f, 19081.0f, 19082.0f, 19083.0f, 19084.0f, 19085.0f, 19086.0f, 19087.0f, 19088.0f, 19089.0f, 19090.0f, 19091.0f, 19092.0f, 19093.0f, 19094.0f, 19095.0f, 19096.0f, 19097.0f, 19098.0f, 19099.0f, 19100.0f, 19101.0f, 19102.0f, 19103.0f, 19104.0f, 19105.0f, 19106.0f, 19107.0f, 19108.0f, 19109.0f, 19110.0f, 19111.0f, 19112.0f, 19113.0f, 19114.0f, 19115.0f, 19116.0f, 19117.0f, 19118.0f, 19119.0f, 19120.0f, 19121.0f, 19122.0f, 19123.0f, 19124.0f, 19125.0f, 19126.0f, 19127.0f, 19128.0f, 19129.0f, 19130.0f, 19131.0f, 19132.0f, 19133.0f, 19134.0f, 19135.0f, 19136.0f, 19137.0f, 19138.0f, 19139.0f, 19140.0f, 19141.0f, 19142.0f, 19143.0f, 19144.0f, 19145.0f, 19146.0f, 19147.0f, 19148.0f, 19149.0f, 19150.0f, 19151.0f, 19152.0f, 19153.0f, 19154.0f, 19155.0f, 19156.0f, 19157.0f, 19158.0f, 19159.0f, 19160.0f, 19161.0f, 19162.0f, 19163.0f, 19164.0f, 19165.0f, 19166.0f, 19167.0f, 19168.0f, 19169.0f, 19170.0f, 19171.0f, 19172.0f, 19173.0f, 19174.0f, 19175.0f, 19176.0f, 19177.0f, 19178.0f, 19179.0f, 19180.0f, 19181.0f, 19182.0f, 19183.0f, 19184.0f, 19185.0f, 19186.0f, 19187.0f, 19188.0f, 19189.0f, 19190.0f, 19191.0f, 19192.0f, 19193.0f, 19194.0f, 19195.0f, 19196.0f, 19197.0f, 19198.0f, 19199.0f, 19200.0f, 19201.0f, 19202.0f, 19203.0f, 19204.0f, 19205.0f, 19206.0f, 19207.0f, 19208.0f, 19209.0f, 19210.0f, 19211.0f, 19212.0f, 19213.0f, 19214.0f, 19215.0f, 19216.0f, 19217.0f, 19218.0f, 19219.0f, 19220.0f, 19221.0f, 19222.0f, 19223.0f, 19224.0f, 19225.0f, 19226.0f, 19227.0f, 19228.0f, 19229.0f, 19230.0f, 19231.0f, 19232.0f, 19233.0f, 19234.0f, 19235.0f, 19236.0f, 19237.0f, 19238.0f, 19239.0f, 19240.0f, 19241.0f, 19242.0f, 19243.0f, 19244.0f, 19245.0f, 19246.0f, 19247.0f, 19248.0f, 19249.0f, 19250.0f, 19251.0f, 19252.0f, 19253.0f, 19254.0f, 19255.0f, 19256.0f, 19257.0f, 19258.0f, 19259.0f, 19260.0f, 19261.0f, 19262.0f, 19263.0f, 19264.0f, 19265.0f, 19266.0f, 19267.0f, 19268.0f, 19269.0f, 19270.0f, 19271.0f, 19272.0f, 19273.0f, 19274.0f, 19275.0f, 19276.0f, 19277.0f, 19278.0f, 19279.0f, 19280.0f, 19281.0f, 19282.0f, 19283.0f, 19284.0f, 19285.0f, 19286.0f, 19287.0f, 19288.0f, 19289.0f, 19290.0f, 19291.0f, 19292.0f, 19293.0f, 19294.0f, 19295.0f, 19296.0f, 19297.0f, 19298.0f, 19299.0f, 19300.0f, 19301.0f, 19302.0f, 19303.0f, 19304.0f, 19305.0f, 19306.0f, 19307.0f, 19308.0f, 19309.0f, 19310.0f, 19311.0f, 19312.0f, 19313.0f, 19314.0f, 19315.0f, 19316.0f, 19317.0f, 19318.0f, 19319.0f, 19320.0f, 19321.0f, 19322.0f, 19323.0f, 19324.0f, 19325.0f, 19326.0f, 19327.0f, 19328.0f, 19329.0f, 19330.0f, 19331.0f, 19332.0f, 19333.0f, 19334.0f, 19335.0f, 19336.0f, 19337.0f, 19338.0f, 19339.0f, 19340.0f, 19341.0f, 19342.0f, 19343.0f, 19344.0f, 19345.0f, 19346.0f, 19347.0f, 19348.0f, 19349.0f, 19350.0f, 19351.0f, 19352.0f, 19353.0f, 19354.0f, 19355.0f, 19356.0f, 19357.0f, 19358.0f, 19359.0f, 19360.0f, 19361.0f, 19362.0f, 19363.0f, 19364.0f, 19365.0f, 19366.0f, 19367.0f, 19368.0f, 19369.0f, 19370.0f, 19371.0f, 19372.0f, 19373.0f, 19374.0f, 19375.0f, 19376.0f, 19377.0f, 19378.0f, 19379.0f, 19380.0f, 19381.0f, 19382.0f, 19383.0f, 19384.0f, 19385.0f, 19386.0f, 19387.0f, 19388.0f, 19389.0f, 19390.0f, 19391.0f, 19392.0f, 19393.0f, 19394.0f, 19395.0f, 19396.0f, 19397.0f, 19398.0f, 19399.0f, 19400.0f, 19401.0f, 19402.0f, 19403.0f, 19404.0f, 19405.0f, 19406.0f, 19407.0f, 19408.0f, 19409.0f, 19410.0f, 19411.0f, 19412.0f, 19413.0f, 19414.0f, 19415.0f, 19416.0f, 19417.0f, 19418.0f, 19419.0f, 19420.0f, 19421.0f, 19422.0f, 19423.0f, 19424.0f, 19425.0f, 19426.0f, 19427.0f, 19428.0f, 19429.0f, 19430.0f, 19431.0f, 19432.0f, 19433.0f, 19434.0f, 19435.0f, 19436.0f, 19437.0f, 19438.0f, 19439.0f, 19440.0f, 19441.0f, 19442.0f, 19443.0f, 19444.0f, 19445.0f, 19446.0f, 19447.0f, 19448.0f, 19449.0f, 19450.0f, 19451.0f, 19452.0f, 19453.0f, 19454.0f, 19455.0f, 19456.0f, 19457.0f, 19458.0f, 19459.0f, 19460.0f, 19461.0f, 19462.0f, 19463.0f, 19464.0f, 19465.0f, 19466.0f, 19467.0f, 19468.0f, 19469.0f, 19470.0f, 19471.0f, 19472.0f, 19473.0f, 19474.0f, 19475.0f, 19476.0f, 19477.0f, 19478.0f, 19479.0f, 19480.0f, 19481.0f, 19482.0f, 19483.0f, 19484.0f, 19485.0f, 19486.0f, 19487.0f, 19488.0f, 19489.0f, 19490.0f, 19491.0f, 19492.0f, 19493.0f, 19494.0f, 19495.0f, 19496.0f, 19497.0f, 19498.0f, 19499.0f, 19500.0f, 19501.0f, 19502.0f, 19503.0f, 19504.0f, 19505.0f, 19506.0f, 19507.0f, 19508.0f, 19509.0f, 19510.0f, 19511.0f, 19512.0f, 19513.0f, 19514.0f, 19515.0f, 19516.0f, 19517.0f, 19518.0f, 19519.0f, 19520.0f, 19521.0f, 19522.0f, 19523.0f, 19524.0f, 19525.0f, 19526.0f, 19527.0f, 19528.0f, 19529.0f, 19530.0f, 19531.0f, 19532.0f, 19533.0f, 19534.0f, 19535.0f, 19536.0f, 19537.0f, 19538.0f, 19539.0f, 19540.0f, 19541.0f, 19542.0f, 19543.0f, 19544.0f, 19545.0f, 19546.0f, 19547.0f, 19548.0f, 19549.0f, 19550.0f, 19551.0f, 19552.0f, 19553.0f, 19554.0f, 19555.0f, 19556.0f, 19557.0f, 19558.0f, 19559.0f, 19560.0f, 19561.0f, 19562.0f, 19563.0f, 19564.0f, 19565.0f, 19566.0f, 19567.0f, 19568.0f, 19569.0f, 19570.0f, 19571.0f, 19572.0f, 19573.0f, 19574.0f, 19575.0f, 19576.0f, 19577.0f, 19578.0f, 19579.0f, 19580.0f, 19581.0f, 19582.0f, 19583.0f, 19584.0f, 19585.0f, 19586.0f, 19587.0f, 19588.0f, 19589.0f, 19590.0f, 19591.0f, 19592.0f, 19593.0f, 19594.0f, 19595.0f, 19596.0f, 19597.0f, 19598.0f, 19599.0f, 19600.0f, 19601.0f, 19602.0f, 19603.0f, 19604.0f, 19605.0f, 19606.0f, 19607.0f, 19608.0f, 19609.0f, 19610.0f, 19611.0f, 19612.0f, 19613.0f, 19614.0f, 19615.0f, 19616.0f, 19617.0f, 19618.0f, 19619.0f, 19620.0f, 19621.0f, 19622.0f, 19623.0f, 19624.0f, 19625.0f, 19626.0f, 19627.0f, 19628.0f, 19629.0f, 19630.0f, 19631.0f, 19632.0f, 19633.0f, 19634.0f, 19635.0f, 19636.0f, 19637.0f, 19638.0f, 19639.0f, 19640.0f, 19641.0f, 19642.0f, 19643.0f, 19644.0f, 19645.0f, 19646.0f, 19647.0f, 19648.0f, 19649.0f, 19650.0f, 19651.0f, 19652.0f, 19653.0f, 19654.0f, 19655.0f, 19656.0f, 19657.0f, 19658.0f, 19659.0f, 19660.0f, 19661.0f, 19662.0f, 19663.0f, 19664.0f, 19665.0f, 19666.0f, 19667.0f, 19668.0f, 19669.0f, 19670.0f, 19671.0f, 19672.0f, 19673.0f, 19674.0f, 19675.0f, 19676.0f, 19677.0f, 19678.0f, 19679.0f, 19680.0f, 19681.0f, 19682.0f, 19683.0f, 19684.0f, 19685.0f, 19686.0f, 19687.0f, 19688.0f, 19689.0f, 19690.0f, 19691.0f, 19692.0f, 19693.0f, 19694.0f, 19695.0f, 19696.0f, 19697.0f, 19698.0f, 19699.0f, 19700.0f, 19701.0f, 19702.0f, 19703.0f, 19704.0f, 19705.0f, 19706.0f, 19707.0f, 19708.0f, 19709.0f, 19710.0f, 19711.0f, 19712.0f, 19713.0f, 19714.0f, 19715.0f, 19716.0f, 19717.0f, 19718.0f, 19719.0f, 19720.0f, 19721.0f, 19722.0f, 19723.0f, 19724.0f, 19725.0f, 19726.0f, 19727.0f, 19728.0f, 19729.0f, 19730.0f, 19731.0f, 19732.0f, 19733.0f, 19734.0f, 19735.0f, 19736.0f, 19737.0f, 19738.0f, 19739.0f, 19740.0f, 19741.0f, 19742.0f, 19743.0f, 19744.0f, 19745.0f, 19746.0f, 19747.0f, 19748.0f, 19749.0f, 19750.0f, 19751.0f, 19752.0f, 19753.0f, 19754.0f, 19755.0f, 19756.0f, 19757.0f, 19758.0f, 19759.0f, 19760.0f, 19761.0f, 19762.0f, 19763.0f, 19764.0f, 19765.0f, 19766.0f, 19767.0f, 19768.0f, 19769.0f, 19770.0f, 19771.0f, 19772.0f, 19773.0f, 19774.0f, 19775.0f, 19776.0f, 19777.0f, 19778.0f, 19779.0f, 19780.0f, 19781.0f, 19782.0f, 19783.0f, 19784.0f, 19785.0f, 19786.0f, 19787.0f, 19788.0f, 19789.0f, 19790.0f, 19791.0f, 19792.0f, 19793.0f, 19794.0f, 19795.0f, 19796.0f, 19797.0f, 19798.0f, 19799.0f, 19800.0f, 19801.0f, 19802.0f, 19803.0f, 19804.0f, 19805.0f, 19806.0f, 19807.0f, 19808.0f, 19809.0f, 19810.0f, 19811.0f, 19812.0f, 19813.0f, 19814.0f, 19815.0f, 19816.0f, 19817.0f, 19818.0f, 19819.0f, 19820.0f, 19821.0f, 19822.0f, 19823.0f, 19824.0f, 19825.0f, 19826.0f, 19827.0f, 19828.0f, 19829.0f, 19830.0f, 19831.0f, 19832.0f, 19833.0f, 19834.0f, 19835.0f, 19836.0f, 19837.0f, 19838.0f, 19839.0f, 19840.0f, 19841.0f, 19842.0f, 19843.0f, 19844.0f, 19845.0f, 19846.0f, 19847.0f, 19848.0f, 19849.0f, 19850.0f, 19851.0f, 19852.0f, 19853.0f, 19854.0f, 19855.0f, 19856.0f, 19857.0f, 19858.0f, 19859.0f, 19860.0f, 19861.0f, 19862.0f, 19863.0f, 19864.0f, 19865.0f, 19866.0f, 19867.0f, 19868.0f, 19869.0f, 19870.0f, 19871.0f, 19872.0f, 19873.0f, 19874.0f, 19875.0f, 19876.0f, 19877.0f, 19878.0f, 19879.0f, 19880.0f, 19881.0f, 19882.0f, 19883.0f, 19884.0f, 19885.0f, 19886.0f, 19887.0f, 19888.0f, 19889.0f, 19890.0f, 19891.0f, 19892.0f, 19893.0f, 19894.0f, 19895.0f, 19896.0f, 19897.0f, 19898.0f, 19899.0f, 19900.0f, 19901.0f, 19902.0f, 19903.0f, 19904.0f, 19905.0f, 19906.0f, 19907.0f, 19908.0f, 19909.0f, 19910.0f, 19911.0f, 19912.0f, 19913.0f, 19914.0f, 19915.0f, 19916.0f, 19917.0f, 19918.0f, 19919.0f, 19920.0f, 19921.0f, 19922.0f, 19923.0f, 19924.0f, 19925.0f, 19926.0f, 19927.0f, 19928.0f, 19929.0f, 19930.0f, 19931.0f, 19932.0f, 19933.0f, 19934.0f, 19935.0f, 19936.0f, 19937.0f, 19938.0f, 19939.0f, 19940.0f, 19941.0f, 19942.0f, 19943.0f, 19944.0f, 19945.0f, 19946.0f, 19947.0f, 19948.0f, 19949.0f, 19950.0f, 19951.0f, 19952.0f, 19953.0f, 19954.0f, 19955.0f, 19956.0f, 19957.0f, 19958.0f, 19959.0f, 19960.0f, 19961.0f, 19962.0f, 19963.0f, 19964.0f, 19965.0f, 19966.0f, 19967.0f, 19968.0f, 19969.0f, 19970.0f, 19971.0f, 19972.0f, 19973.0f, 19974.0f, 19975.0f, 19976.0f, 19977.0f, 19978.0f, 19979.0f, 19980.0f, 19981.0f, 19982.0f, 19983.0f, 19984.0f, 19985.0f, 19986.0f, 19987.0f, 19988.0f, 19989.0f, 19990.0f, 19991.0f, 19992.0f, 19993.0f, 19994.0f, 19995.0f, 19996.0f, 19997.0f, 19998.0f, 19999.0f, 20000.0f, 20001.0f, 20002.0f, 20003.0f, 20004.0f, 20005.0f, 20006.0f, 20007.0f, 20008.0f, 20009.0f, 20010.0f, 20011.0f, 20012.0f, 20013.0f, 20014.0f, 20015.0f, 20016.0f, 20017.0f, 20018.0f, 20019.0f, 20020.0f, 20021.0f, 20022.0f, 20023.0f, 20024.0f, 20025.0f, 20026.0f, 20027.0f, 20028.0f, 20029.0f, 20030.0f, 20031.0f, 20032.0f, 20033.0f, 20034.0f, 20035.0f, 20036.0f, 20037.0f, 20038.0f, 20039.0f, 20040.0f, 20041.0f, 20042.0f, 20043.0f, 20044.0f, 20045.0f, 20046.0f, 20047.0f, 20048.0f, 20049.0f, 20050.0f, 20051.0f, 20052.0f, 20053.0f, 20054.0f, 20055.0f, 20056.0f, 20057.0f, 20058.0f, 20059.0f, 20060.0f, 20061.0f, 20062.0f, 20063.0f, 20064.0f, 20065.0f, 20066.0f, 20067.0f, 20068.0f, 20069.0f, 20070.0f, 20071.0f, 20072.0f, 20073.0f, 20074.0f, 20075.0f, 20076.0f, 20077.0f, 20078.0f, 20079.0f, 20080.0f, 20081.0f, 20082.0f, 20083.0f, 20084.0f, 20085.0f, 20086.0f, 20087.0f, 20088.0f, 20089.0f, 20090.0f, 20091.0f, 20092.0f, 20093.0f, 20094.0f, 20095.0f, 20096.0f, 20097.0f, 20098.0f, 20099.0f, 20100.0f, 20101.0f, 20102.0f, 20103.0f, 20104.0f, 20105.0f, 20106.0f, 20107.0f, 20108.0f, 20109.0f, 20110.0f, 20111.0f, 20112.0f, 20113.0f, 20114.0f, 20115.0f, 20116.0f, 20117.0f, 20118.0f, 20119.0f, 20120.0f, 20121.0f, 20122.0f, 20123.0f, 20124.0f, 20125.0f, 20126.0f, 20127.0f, 20128.0f, 20129.0f, 20130.0f, 20131.0f, 20132.0f, 20133.0f, 20134.0f, 20135.0f, 20136.0f, 20137.0f, 20138.0f, 20139.0f, 20140.0f, 20141.0f, 20142.0f, 20143.0f, 20144.0f, 20145.0f, 20146.0f, 20147.0f, 20148.0f, 20149.0f, 20150.0f, 20151.0f, 20152.0f, 20153.0f, 20154.0f, 20155.0f, 20156.0f, 20157.0f, 20158.0f, 20159.0f, 20160.0f, 20161.0f, 20162.0f, 20163.0f, 20164.0f, 20165.0f, 20166.0f, 20167.0f, 20168.0f, 20169.0f, 20170.0f, 20171.0f, 20172.0f, 20173.0f, 20174.0f, 20175.0f, 20176.0f, 20177.0f, 20178.0f, 20179.0f, 20180.0f, 20181.0f, 20182.0f, 20183.0f, 20184.0f, 20185.0f, 20186.0f, 20187.0f, 20188.0f, 20189.0f, 20190.0f, 20191.0f, 20192.0f, 20193.0f, 20194.0f, 20195.0f, 20196.0f, 20197.0f, 20198.0f, 20199.0f, 20200.0f, 20201.0f, 20202.0f, 20203.0f, 20204.0f, 20205.0f, 20206.0f, 20207.0f, 20208.0f, 20209.0f, 20210.0f, 20211.0f, 20212.0f, 20213.0f, 20214.0f, 20215.0f, 20216.0f, 20217.0f, 20218.0f, 20219.0f, 20220.0f, 20221.0f, 20222.0f, 20223.0f, 20224.0f, 20225.0f, 20226.0f, 20227.0f, 20228.0f, 20229.0f, 20230.0f, 20231.0f, 20232.0f, 20233.0f, 20234.0f, 20235.0f, 20236.0f, 20237.0f, 20238.0f, 20239.0f, 20240.0f, 20241.0f, 20242.0f, 20243.0f, 20244.0f, 20245.0f, 20246.0f, 20247.0f, 20248.0f, 20249.0f, 20250.0f, 20251.0f, 20252.0f, 20253.0f, 20254.0f, 20255.0f, 20256.0f, 20257.0f, 20258.0f, 20259.0f, 20260.0f, 20261.0f, 20262.0f, 20263.0f, 20264.0f, 20265.0f, 20266.0f, 20267.0f, 20268.0f, 20269.0f, 20270.0f, 20271.0f, 20272.0f, 20273.0f, 20274.0f, 20275.0f, 20276.0f, 20277.0f, 20278.0f, 20279.0f, 20280.0f, 20281.0f, 20282.0f, 20283.0f, 20284.0f, 20285.0f, 20286.0f, 20287.0f, 20288.0f, 20289.0f, 20290.0f, 20291.0f, 20292.0f, 20293.0f, 20294.0f, 20295.0f, 20296.0f, 20297.0f, 20298.0f, 20299.0f, 20300.0f, 20301.0f, 20302.0f, 20303.0f, 20304.0f, 20305.0f, 20306.0f, 20307.0f, 20308.0f, 20309.0f, 20310.0f, 20311.0f, 20312.0f, 20313.0f, 20314.0f, 20315.0f, 20316.0f, 20317.0f, 20318.0f, 20319.0f, 20320.0f, 20321.0f, 20322.0f, 20323.0f, 20324.0f, 20325.0f, 20326.0f, 20327.0f, 20328.0f, 20329.0f, 20330.0f, 20331.0f, 20332.0f, 20333.0f, 20334.0f, 20335.0f, 20336.0f, 20337.0f, 20338.0f, 20339.0f, 20340.0f, 20341.0f, 20342.0f, 20343.0f, 20344.0f, 20345.0f, 20346.0f, 20347.0f, 20348.0f, 20349.0f, 20350.0f, 20351.0f, 20352.0f, 20353.0f, 20354.0f, 20355.0f, 20356.0f, 20357.0f, 20358.0f, 20359.0f, 20360.0f, 20361.0f, 20362.0f, 20363.0f, 20364.0f, 20365.0f, 20366.0f, 20367.0f, 20368.0f, 20369.0f, 20370.0f, 20371.0f, 20372.0f, 20373.0f, 20374.0f, 20375.0f, 20376.0f, 20377.0f, 20378.0f, 20379.0f, 20380.0f, 20381.0f, 20382.0f, 20383.0f, 20384.0f, 20385.0f, 20386.0f, 20387.0f, 20388.0f, 20389.0f, 20390.0f, 20391.0f, 20392.0f, 20393.0f, 20394.0f, 20395.0f, 20396.0f, 20397.0f, 20398.0f, 20399.0f, 20400.0f, 20401.0f, 20402.0f, 20403.0f, 20404.0f, 20405.0f, 20406.0f, 20407.0f, 20408.0f, 20409.0f, 20410.0f, 20411.0f, 20412.0f, 20413.0f, 20414.0f, 20415.0f, 20416.0f, 20417.0f, 20418.0f, 20419.0f, 20420.0f, 20421.0f, 20422.0f, 20423.0f, 20424.0f, 20425.0f, 20426.0f, 20427.0f, 20428.0f, 20429.0f, 20430.0f, 20431.0f, 20432.0f, 20433.0f, 20434.0f, 20435.0f, 20436.0f, 20437.0f, 20438.0f, 20439.0f, 20440.0f, 20441.0f, 20442.0f, 20443.0f, 20444.0f, 20445.0f, 20446.0f, 20447.0f, 20448.0f, 20449.0f, 20450.0f, 20451.0f, 20452.0f, 20453.0f, 20454.0f, 20455.0f, 20456.0f, 20457.0f, 20458.0f, 20459.0f, 20460.0f, 20461.0f, 20462.0f, 20463.0f, 20464.0f, 20465.0f, 20466.0f, 20467.0f, 20468.0f, 20469.0f, 20470.0f, 20471.0f, 20472.0f, 20473.0f, 20474.0f, 20475.0f, 20476.0f, 20477.0f, 20478.0f, 20479.0f, 20480.0f, 20481.0f, 20482.0f, 20483.0f, 20484.0f, 20485.0f, 20486.0f, 20487.0f, 20488.0f, 20489.0f, 20490.0f, 20491.0f, 20492.0f, 20493.0f, 20494.0f, 20495.0f, 20496.0f, 20497.0f, 20498.0f, 20499.0f, 20500.0f, 20501.0f, 20502.0f, 20503.0f, 20504.0f, 20505.0f, 20506.0f, 20507.0f, 20508.0f, 20509.0f, 20510.0f, 20511.0f, 20512.0f, 20513.0f, 20514.0f, 20515.0f, 20516.0f, 20517.0f, 20518.0f, 20519.0f, 20520.0f, 20521.0f, 20522.0f, 20523.0f, 20524.0f, 20525.0f, 20526.0f, 20527.0f, 20528.0f, 20529.0f, 20530.0f, 20531.0f, 20532.0f, 20533.0f, 20534.0f, 20535.0f, 20536.0f, 20537.0f, 20538.0f, 20539.0f, 20540.0f, 20541.0f, 20542.0f, 20543.0f, 20544.0f, 20545.0f, 20546.0f, 20547.0f, 20548.0f, 20549.0f, 20550.0f, 20551.0f, 20552.0f, 20553.0f, 20554.0f, 20555.0f, 20556.0f, 20557.0f, 20558.0f, 20559.0f, 20560.0f, 20561.0f, 20562.0f, 20563.0f, 20564.0f, 20565.0f, 20566.0f, 20567.0f, 20568.0f, 20569.0f, 20570.0f, 20571.0f, 20572.0f, 20573.0f, 20574.0f, 20575.0f, 20576.0f, 20577.0f, 20578.0f, 20579.0f, 20580.0f, 20581.0f, 20582.0f, 20583.0f, 20584.0f, 20585.0f, 20586.0f, 20587.0f, 20588.0f, 20589.0f, 20590.0f, 20591.0f, 20592.0f, 20593.0f, 20594.0f, 20595.0f, 20596.0f, 20597.0f, 20598.0f, 20599.0f, 20600.0f, 20601.0f, 20602.0f, 20603.0f, 20604.0f, 20605.0f, 20606.0f, 20607.0f, 20608.0f, 20609.0f, 20610.0f, 20611.0f, 20612.0f, 20613.0f, 20614.0f, 20615.0f, 20616.0f, 20617.0f, 20618.0f, 20619.0f, 20620.0f, 20621.0f, 20622.0f, 20623.0f, 20624.0f, 20625.0f, 20626.0f, 20627.0f, 20628.0f, 20629.0f, 20630.0f, 20631.0f, 20632.0f, 20633.0f, 20634.0f, 20635.0f, 20636.0f, 20637.0f, 20638.0f, 20639.0f, 20640.0f, 20641.0f, 20642.0f, 20643.0f, 20644.0f, 20645.0f, 20646.0f, 20647.0f, 20648.0f, 20649.0f, 20650.0f, 20651.0f, 20652.0f, 20653.0f, 20654.0f, 20655.0f, 20656.0f, 20657.0f, 20658.0f, 20659.0f, 20660.0f, 20661.0f, 20662.0f, 20663.0f, 20664.0f, 20665.0f, 20666.0f, 20667.0f, 20668.0f, 20669.0f, 20670.0f, 20671.0f, 20672.0f, 20673.0f, 20674.0f, 20675.0f, 20676.0f, 20677.0f, 20678.0f, 20679.0f, 20680.0f, 20681.0f, 20682.0f, 20683.0f, 20684.0f, 20685.0f, 20686.0f, 20687.0f, 20688.0f, 20689.0f, 20690.0f, 20691.0f, 20692.0f, 20693.0f, 20694.0f, 20695.0f, 20696.0f, 20697.0f, 20698.0f, 20699.0f, 20700.0f, 20701.0f, 20702.0f, 20703.0f, 20704.0f, 20705.0f, 20706.0f, 20707.0f, 20708.0f, 20709.0f, 20710.0f, 20711.0f, 20712.0f, 20713.0f, 20714.0f, 20715.0f, 20716.0f, 20717.0f, 20718.0f, 20719.0f, 20720.0f, 20721.0f, 20722.0f, 20723.0f, 20724.0f, 20725.0f, 20726.0f, 20727.0f, 20728.0f, 20729.0f, 20730.0f, 20731.0f, 20732.0f, 20733.0f, 20734.0f, 20735.0f, 20736.0f, 20737.0f, 20738.0f, 20739.0f, 20740.0f, 20741.0f, 20742.0f, 20743.0f, 20744.0f, 20745.0f, 20746.0f, 20747.0f, 20748.0f, 20749.0f, 20750.0f, 20751.0f, 20752.0f, 20753.0f, 20754.0f, 20755.0f, 20756.0f, 20757.0f, 20758.0f, 20759.0f, 20760.0f, 20761.0f, 20762.0f, 20763.0f, 20764.0f, 20765.0f, 20766.0f, 20767.0f, 20768.0f, 20769.0f, 20770.0f, 20771.0f, 20772.0f, 20773.0f, 20774.0f, 20775.0f, 20776.0f, 20777.0f, 20778.0f, 20779.0f, 20780.0f, 20781.0f, 20782.0f, 20783.0f, 20784.0f, 20785.0f, 20786.0f, 20787.0f, 20788.0f, 20789.0f, 20790.0f, 20791.0f, 20792.0f, 20793.0f, 20794.0f, 20795.0f, 20796.0f, 20797.0f, 20798.0f, 20799.0f, 20800.0f, 20801.0f, 20802.0f, 20803.0f, 20804.0f, 20805.0f, 20806.0f, 20807.0f, 20808.0f, 20809.0f, 20810.0f, 20811.0f, 20812.0f, 20813.0f, 20814.0f, 20815.0f, 20816.0f, 20817.0f, 20818.0f, 20819.0f, 20820.0f, 20821.0f, 20822.0f, 20823.0f, 20824.0f, 20825.0f, 20826.0f, 20827.0f, 20828.0f, 20829.0f, 20830.0f, 20831.0f, 20832.0f, 20833.0f, 20834.0f, 20835.0f, 20836.0f, 20837.0f, 20838.0f, 20839.0f, 20840.0f, 20841.0f, 20842.0f, 20843.0f, 20844.0f, 20845.0f, 20846.0f, 20847.0f, 20848.0f, 20849.0f, 20850.0f, 20851.0f, 20852.0f, 20853.0f, 20854.0f, 20855.0f, 20856.0f, 20857.0f, 20858.0f, 20859.0f, 20860.0f, 20861.0f, 20862.0f, 20863.0f, 20864.0f, 20865.0f, 20866.0f, 20867.0f, 20868.0f, 20869.0f, 20870.0f, 20871.0f, 20872.0f, 20873.0f, 20874.0f, 20875.0f, 20876.0f, 20877.0f, 20878.0f, 20879.0f, 20880.0f, 20881.0f, 20882.0f, 20883.0f, 20884.0f, 20885.0f, 20886.0f, 20887.0f, 20888.0f, 20889.0f, 20890.0f, 20891.0f, 20892.0f, 20893.0f, 20894.0f, 20895.0f, 20896.0f, 20897.0f, 20898.0f, 20899.0f, 20900.0f, 20901.0f, 20902.0f, 20903.0f, 20904.0f, 20905.0f, 20906.0f, 20907.0f, 20908.0f, 20909.0f, 20910.0f, 20911.0f, 20912.0f, 20913.0f, 20914.0f, 20915.0f, 20916.0f, 20917.0f, 20918.0f, 20919.0f, 20920.0f, 20921.0f, 20922.0f, 20923.0f, 20924.0f, 20925.0f, 20926.0f, 20927.0f, 20928.0f, 20929.0f, 20930.0f, 20931.0f, 20932.0f, 20933.0f, 20934.0f, 20935.0f, 20936.0f, 20937.0f, 20938.0f, 20939.0f, 20940.0f, 20941.0f, 20942.0f, 20943.0f, 20944.0f, 20945.0f, 20946.0f, 20947.0f, 20948.0f, 20949.0f, 20950.0f, 20951.0f, 20952.0f, 20953.0f, 20954.0f, 20955.0f, 20956.0f, 20957.0f, 20958.0f, 20959.0f, 20960.0f, 20961.0f, 20962.0f, 20963.0f, 20964.0f, 20965.0f, 20966.0f, 20967.0f, 20968.0f, 20969.0f, 20970.0f, 20971.0f, 20972.0f, 20973.0f, 20974.0f, 20975.0f, 20976.0f, 20977.0f, 20978.0f, 20979.0f, 20980.0f, 20981.0f, 20982.0f, 20983.0f, 20984.0f, 20985.0f, 20986.0f, 20987.0f, 20988.0f, 20989.0f, 20990.0f, 20991.0f, 20992.0f, 20993.0f, 20994.0f, 20995.0f, 20996.0f, 20997.0f, 20998.0f, 20999.0f, 21000.0f, 21001.0f, 21002.0f, 21003.0f, 21004.0f, 21005.0f, 21006.0f, 21007.0f, 21008.0f, 21009.0f, 21010.0f, 21011.0f, 21012.0f, 21013.0f, 21014.0f, 21015.0f, 21016.0f, 21017.0f, 21018.0f, 21019.0f, 21020.0f, 21021.0f, 21022.0f, 21023.0f, 21024.0f, 21025.0f, 21026.0f, 21027.0f, 21028.0f, 21029.0f, 21030.0f, 21031.0f, 21032.0f, 21033.0f, 21034.0f, 21035.0f, 21036.0f, 21037.0f, 21038.0f, 21039.0f, 21040.0f, 21041.0f, 21042.0f, 21043.0f, 21044.0f, 21045.0f, 21046.0f, 21047.0f, 21048.0f, 21049.0f, 21050.0f, 21051.0f, 21052.0f, 21053.0f, 21054.0f, 21055.0f, 21056.0f, 21057.0f, 21058.0f, 21059.0f, 21060.0f, 21061.0f, 21062.0f, 21063.0f, 21064.0f, 21065.0f, 21066.0f, 21067.0f, 21068.0f, 21069.0f, 21070.0f, 21071.0f, 21072.0f, 21073.0f, 21074.0f, 21075.0f, 21076.0f, 21077.0f, 21078.0f, 21079.0f, 21080.0f, 21081.0f, 21082.0f, 21083.0f, 21084.0f, 21085.0f, 21086.0f, 21087.0f, 21088.0f, 21089.0f, 21090.0f, 21091.0f, 21092.0f, 21093.0f, 21094.0f, 21095.0f, 21096.0f, 21097.0f, 21098.0f, 21099.0f, 21100.0f, 21101.0f, 21102.0f, 21103.0f, 21104.0f, 21105.0f, 21106.0f, 21107.0f, 21108.0f, 21109.0f, 21110.0f, 21111.0f, 21112.0f, 21113.0f, 21114.0f, 21115.0f, 21116.0f, 21117.0f, 21118.0f, 21119.0f, 21120.0f, 21121.0f, 21122.0f, 21123.0f, 21124.0f, 21125.0f, 21126.0f, 21127.0f, 21128.0f, 21129.0f, 21130.0f, 21131.0f, 21132.0f, 21133.0f, 21134.0f, 21135.0f, 21136.0f, 21137.0f, 21138.0f, 21139.0f, 21140.0f, 21141.0f, 21142.0f, 21143.0f, 21144.0f, 21145.0f, 21146.0f, 21147.0f, 21148.0f, 21149.0f, 21150.0f, 21151.0f, 21152.0f, 21153.0f, 21154.0f, 21155.0f, 21156.0f, 21157.0f, 21158.0f, 21159.0f}}},
@@ -25,7 +26,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, 56.0f, 57.0f, 58.0f, 59.0f, 60.0f, 61.0f, 62.0f, 63.0f, 64.0f, 65.0f, 66.0f, 67.0f, 68.0f, 69.0f, 70.0f, 71.0f, 72.0f, 73.0f, 74.0f, 75.0f, 76.0f, 77.0f, 78.0f, 79.0f, 80.0f, 81.0f, 82.0f, 83.0f, 84.0f, 85.0f, 86.0f, 87.0f, 88.0f, 89.0f, 90.0f, 91.0f, 92.0f, 93.0f, 94.0f, 95.0f, 96.0f, 97.0f, 98.0f, 99.0f, 100.0f, 101.0f, 102.0f, 103.0f, 104.0f, 105.0f, 106.0f, 107.0f, 108.0f, 109.0f, 110.0f, 111.0f, 112.0f, 113.0f, 114.0f, 115.0f, 116.0f, 117.0f, 118.0f, 119.0f, 120.0f, 121.0f, 122.0f, 123.0f, 124.0f, 125.0f, 126.0f, 127.0f, 128.0f, 129.0f, 130.0f, 131.0f, 132.0f, 133.0f, 134.0f, 135.0f, 136.0f, 137.0f, 138.0f, 139.0f, 140.0f, 141.0f, 142.0f, 143.0f, 144.0f, 145.0f, 146.0f, 147.0f, 148.0f, 149.0f, 150.0f, 151.0f, 152.0f, 153.0f, 154.0f, 155.0f, 156.0f, 157.0f, 158.0f, 159.0f, 160.0f, 161.0f, 162.0f, 163.0f, 164.0f, 165.0f, 166.0f, 167.0f, 168.0f, 169.0f, 170.0f, 171.0f, 172.0f, 173.0f, 174.0f, 175.0f, 176.0f, 177.0f, 178.0f, 179.0f, 180.0f, 181.0f, 182.0f, 183.0f, 184.0f, 185.0f, 186.0f, 187.0f, 188.0f, 189.0f, 190.0f, 191.0f, 192.0f, 193.0f, 194.0f, 195.0f, 196.0f, 197.0f, 198.0f, 199.0f, 200.0f, 201.0f, 202.0f, 203.0f, 204.0f, 205.0f, 206.0f, 207.0f, 208.0f, 209.0f, 210.0f, 211.0f, 212.0f, 213.0f, 214.0f, 215.0f, 216.0f, 217.0f, 218.0f, 219.0f, 220.0f, 221.0f, 222.0f, 223.0f, 224.0f, 225.0f, 226.0f, 227.0f, 228.0f, 229.0f, 230.0f, 231.0f, 232.0f, 233.0f, 234.0f, 235.0f, 236.0f, 237.0f, 238.0f, 239.0f, 240.0f, 241.0f, 242.0f, 243.0f, 244.0f, 245.0f, 246.0f, 247.0f, 248.0f, 249.0f, 250.0f, 251.0f, 252.0f, 253.0f, 254.0f, 255.0f, 256.0f, 257.0f, 258.0f, 259.0f, 260.0f, 261.0f, 262.0f, 263.0f, 264.0f, 265.0f, 266.0f, 267.0f, 268.0f, 269.0f, 270.0f, 271.0f, 272.0f, 273.0f, 274.0f, 275.0f, 276.0f, 277.0f, 278.0f, 279.0f, 280.0f, 281.0f, 282.0f, 283.0f, 284.0f, 285.0f, 286.0f, 287.0f, 288.0f, 289.0f, 290.0f, 291.0f, 292.0f, 293.0f, 294.0f, 295.0f, 296.0f, 297.0f, 298.0f, 299.0f, 300.0f, 301.0f, 302.0f, 303.0f, 304.0f, 305.0f, 306.0f, 307.0f, 308.0f, 309.0f, 310.0f, 311.0f, 312.0f, 313.0f, 314.0f, 315.0f, 316.0f, 317.0f, 318.0f, 319.0f, 320.0f, 321.0f, 322.0f, 323.0f, 324.0f, 325.0f, 326.0f, 327.0f, 328.0f, 329.0f, 330.0f, 331.0f, 332.0f, 333.0f, 334.0f, 335.0f, 336.0f, 337.0f, 338.0f, 339.0f, 340.0f, 341.0f, 342.0f, 343.0f, 344.0f, 345.0f, 346.0f, 347.0f, 348.0f, 349.0f, 350.0f, 351.0f, 352.0f, 353.0f, 354.0f, 355.0f, 356.0f, 357.0f, 358.0f, 359.0f, 360.0f, 361.0f, 362.0f, 363.0f, 364.0f, 365.0f, 366.0f, 367.0f, 368.0f, 369.0f, 370.0f, 371.0f, 372.0f, 373.0f, 374.0f, 375.0f, 376.0f, 377.0f, 378.0f, 379.0f, 380.0f, 381.0f, 382.0f, 383.0f, 384.0f, 385.0f, 386.0f, 387.0f, 388.0f, 389.0f, 390.0f, 391.0f, 392.0f, 393.0f, 394.0f, 395.0f, 396.0f, 397.0f, 398.0f, 399.0f, 400.0f, 401.0f, 402.0f, 403.0f, 404.0f, 405.0f, 406.0f, 407.0f, 408.0f, 409.0f, 410.0f, 411.0f, 412.0f, 413.0f, 414.0f, 415.0f, 416.0f, 417.0f, 418.0f, 419.0f, 420.0f, 421.0f, 422.0f, 423.0f, 424.0f, 425.0f, 426.0f, 427.0f, 428.0f, 429.0f, 430.0f, 431.0f, 432.0f, 433.0f, 434.0f, 435.0f, 436.0f, 437.0f, 438.0f, 439.0f, 440.0f, 441.0f, 442.0f, 443.0f, 444.0f, 445.0f, 446.0f, 447.0f, 448.0f, 449.0f, 450.0f, 451.0f, 452.0f, 453.0f, 454.0f, 455.0f, 456.0f, 457.0f, 458.0f, 459.0f, 460.0f, 461.0f, 462.0f, 463.0f, 464.0f, 465.0f, 466.0f, 467.0f, 468.0f, 469.0f, 470.0f, 471.0f, 472.0f, 473.0f, 474.0f, 475.0f, 476.0f, 477.0f, 478.0f, 479.0f, 480.0f, 481.0f, 482.0f, 483.0f, 484.0f, 485.0f, 486.0f, 487.0f, 488.0f, 489.0f, 490.0f, 491.0f, 492.0f, 493.0f, 494.0f, 495.0f, 496.0f, 497.0f, 498.0f, 499.0f, 500.0f, 501.0f, 502.0f, 503.0f, 504.0f, 505.0f, 506.0f, 507.0f, 508.0f, 509.0f, 510.0f, 511.0f, 512.0f, 513.0f, 514.0f, 515.0f, 516.0f, 517.0f, 518.0f, 519.0f, 520.0f, 521.0f, 522.0f, 523.0f, 524.0f, 525.0f, 526.0f, 527.0f, 528.0f, 529.0f, 530.0f, 531.0f, 532.0f, 533.0f, 534.0f, 535.0f, 536.0f, 537.0f, 538.0f, 539.0f, 540.0f, 541.0f, 542.0f, 543.0f, 544.0f, 545.0f, 546.0f, 547.0f, 548.0f, 549.0f, 550.0f, 551.0f, 552.0f, 553.0f, 554.0f, 555.0f, 556.0f, 557.0f, 558.0f, 559.0f, 560.0f, 561.0f, 562.0f, 563.0f, 564.0f, 565.0f, 566.0f, 567.0f, 568.0f, 569.0f, 570.0f, 571.0f, 572.0f, 573.0f, 574.0f, 575.0f, 576.0f, 577.0f, 578.0f, 579.0f, 580.0f, 581.0f, 582.0f, 583.0f, 584.0f, 585.0f, 586.0f, 587.0f, 588.0f, 589.0f, 590.0f, 591.0f, 592.0f, 593.0f, 594.0f, 595.0f, 596.0f, 597.0f, 598.0f, 599.0f, 600.0f, 601.0f, 602.0f, 603.0f, 604.0f, 605.0f, 606.0f, 607.0f, 608.0f, 609.0f, 610.0f, 611.0f, 612.0f, 613.0f, 614.0f, 615.0f, 616.0f, 617.0f, 618.0f, 619.0f, 620.0f, 621.0f, 622.0f, 623.0f, 624.0f, 625.0f, 626.0f, 627.0f, 628.0f, 629.0f, 630.0f, 631.0f, 632.0f, 633.0f, 634.0f, 635.0f, 636.0f, 637.0f, 638.0f, 639.0f, 640.0f, 641.0f, 642.0f, 643.0f, 644.0f, 645.0f, 646.0f, 647.0f, 648.0f, 649.0f, 650.0f, 651.0f, 652.0f, 653.0f, 654.0f, 655.0f, 656.0f, 657.0f, 658.0f, 659.0f, 660.0f, 661.0f, 662.0f, 663.0f, 664.0f, 665.0f, 666.0f, 667.0f, 668.0f, 669.0f, 670.0f, 671.0f, 672.0f, 673.0f, 674.0f, 675.0f, 676.0f, 677.0f, 678.0f, 679.0f, 680.0f, 681.0f, 682.0f, 683.0f, 684.0f, 685.0f, 686.0f, 687.0f, 688.0f, 689.0f, 690.0f, 691.0f, 692.0f, 693.0f, 694.0f, 695.0f, 696.0f, 697.0f, 698.0f, 699.0f, 700.0f, 701.0f, 702.0f, 703.0f, 704.0f, 705.0f, 706.0f, 707.0f, 708.0f, 709.0f, 710.0f, 711.0f, 712.0f, 713.0f, 714.0f, 715.0f, 716.0f, 717.0f, 718.0f, 719.0f, 720.0f, 721.0f, 722.0f, 723.0f, 724.0f, 725.0f, 726.0f, 727.0f, 728.0f, 729.0f, 730.0f, 731.0f, 732.0f, 733.0f, 734.0f, 735.0f, 736.0f, 737.0f, 738.0f, 739.0f, 740.0f, 741.0f, 742.0f, 743.0f, 744.0f, 745.0f, 746.0f, 747.0f, 748.0f, 749.0f, 750.0f, 751.0f, 752.0f, 753.0f, 754.0f, 755.0f, 756.0f, 757.0f, 758.0f, 759.0f, 760.0f, 761.0f, 762.0f, 763.0f, 764.0f, 765.0f, 766.0f, 767.0f, 768.0f, 769.0f, 770.0f, 771.0f, 772.0f, 773.0f, 774.0f, 775.0f, 776.0f, 777.0f, 778.0f, 779.0f, 780.0f, 781.0f, 782.0f, 783.0f, 784.0f, 785.0f, 786.0f, 787.0f, 788.0f, 789.0f, 790.0f, 791.0f, 792.0f, 793.0f, 794.0f, 795.0f, 796.0f, 797.0f, 798.0f, 799.0f, 800.0f, 801.0f, 802.0f, 803.0f, 804.0f, 805.0f, 806.0f, 807.0f, 808.0f, 809.0f, 810.0f, 811.0f, 812.0f, 813.0f, 814.0f, 815.0f, 816.0f, 817.0f, 818.0f, 819.0f, 820.0f, 821.0f, 822.0f, 823.0f, 824.0f, 825.0f, 826.0f, 827.0f, 828.0f, 829.0f, 830.0f, 831.0f, 832.0f, 833.0f, 834.0f, 835.0f, 836.0f, 837.0f, 838.0f, 839.0f, 840.0f, 841.0f, 842.0f, 843.0f, 844.0f, 845.0f, 846.0f, 847.0f, 848.0f, 849.0f, 850.0f, 851.0f, 852.0f, 853.0f, 854.0f, 855.0f, 856.0f, 857.0f, 858.0f, 859.0f, 860.0f, 861.0f, 862.0f, 863.0f, 864.0f, 865.0f, 866.0f, 867.0f, 868.0f, 869.0f, 870.0f, 871.0f, 872.0f, 873.0f, 874.0f, 875.0f, 876.0f, 877.0f, 878.0f, 879.0f, 880.0f, 881.0f, 882.0f, 883.0f, 884.0f, 885.0f, 886.0f, 887.0f, 888.0f, 889.0f, 890.0f, 891.0f, 892.0f, 893.0f, 894.0f, 895.0f, 896.0f, 897.0f, 898.0f, 899.0f, 900.0f, 901.0f, 902.0f, 903.0f, 904.0f, 905.0f, 906.0f, 907.0f, 908.0f, 909.0f, 910.0f, 911.0f, 912.0f, 913.0f, 914.0f, 915.0f, 916.0f, 917.0f, 918.0f, 919.0f, 920.0f, 921.0f, 922.0f, 923.0f, 924.0f, 925.0f, 926.0f, 927.0f, 928.0f, 929.0f, 930.0f, 931.0f, 932.0f, 933.0f, 934.0f, 935.0f, 936.0f, 937.0f, 938.0f, 939.0f, 940.0f, 941.0f, 942.0f, 943.0f, 944.0f, 945.0f, 946.0f, 947.0f, 948.0f, 949.0f, 950.0f, 951.0f, 952.0f, 953.0f, 954.0f, 955.0f, 956.0f, 957.0f, 958.0f, 959.0f, 960.0f, 961.0f, 962.0f, 963.0f, 964.0f, 965.0f, 966.0f, 967.0f, 968.0f, 969.0f, 970.0f, 971.0f, 972.0f, 973.0f, 974.0f, 975.0f, 976.0f, 977.0f, 978.0f, 979.0f, 980.0f, 981.0f, 982.0f, 983.0f, 984.0f, 985.0f, 986.0f, 987.0f, 988.0f, 989.0f, 990.0f, 991.0f, 992.0f, 993.0f, 994.0f, 995.0f, 996.0f, 997.0f, 998.0f, 999.0f, 1000.0f, 1001.0f, 1002.0f, 1003.0f, 1004.0f, 1005.0f, 1006.0f, 1007.0f, 1008.0f, 1009.0f, 1010.0f, 1011.0f, 1012.0f, 1013.0f, 1014.0f, 1015.0f, 1016.0f, 1017.0f, 1018.0f, 1019.0f, 1020.0f, 1021.0f, 1022.0f, 1023.0f, 1024.0f, 1025.0f, 1026.0f, 1027.0f, 1028.0f, 1029.0f, 1030.0f, 1031.0f, 1032.0f, 1033.0f, 1034.0f, 1035.0f, 1036.0f, 1037.0f, 1038.0f, 1039.0f, 1040.0f, 1041.0f, 1042.0f, 1043.0f, 1044.0f, 1045.0f, 1046.0f, 1047.0f, 1048.0f, 1049.0f, 1050.0f, 1051.0f, 1052.0f, 1053.0f, 1054.0f, 1055.0f, 1056.0f, 1057.0f, 1058.0f, 1059.0f, 1060.0f, 1061.0f, 1062.0f, 1063.0f, 1064.0f, 1065.0f, 1066.0f, 1067.0f, 1068.0f, 1069.0f, 1070.0f, 1071.0f, 1072.0f, 1073.0f, 1074.0f, 1075.0f, 1076.0f, 1077.0f, 1078.0f, 1079.0f, 1080.0f, 1081.0f, 1082.0f, 1083.0f, 1084.0f, 1085.0f, 1086.0f, 1087.0f, 1088.0f, 1089.0f, 1090.0f, 1091.0f, 1092.0f, 1093.0f, 1094.0f, 1095.0f, 1096.0f, 1097.0f, 1098.0f, 1099.0f, 1100.0f, 1101.0f, 1102.0f, 1103.0f, 1104.0f, 1105.0f, 1106.0f, 1107.0f, 1108.0f, 1109.0f, 1110.0f, 1111.0f, 1112.0f, 1113.0f, 1114.0f, 1115.0f, 1116.0f, 1117.0f, 1118.0f, 1119.0f, 1120.0f, 1121.0f, 1122.0f, 1123.0f, 1124.0f, 1125.0f, 1126.0f, 1127.0f, 1128.0f, 1129.0f, 1130.0f, 1131.0f, 1132.0f, 1133.0f, 1134.0f, 1135.0f, 1136.0f, 1137.0f, 1138.0f, 1139.0f, 1140.0f, 1141.0f, 1142.0f, 1143.0f, 1144.0f, 1145.0f, 1146.0f, 1147.0f, 1148.0f, 1149.0f, 1150.0f, 1151.0f, 1152.0f, 1153.0f, 1154.0f, 1155.0f, 1156.0f, 1157.0f, 1158.0f, 1159.0f, 1160.0f, 1161.0f, 1162.0f, 1163.0f, 1164.0f, 1165.0f, 1166.0f, 1167.0f, 1168.0f, 1169.0f, 1170.0f, 1171.0f, 1172.0f, 1173.0f, 1174.0f, 1175.0f, 1176.0f, 1177.0f, 1178.0f, 1179.0f, 1180.0f, 1181.0f, 1182.0f, 1183.0f, 1184.0f, 1185.0f, 1186.0f, 1187.0f, 1188.0f, 1189.0f, 1190.0f, 1191.0f, 1192.0f, 1193.0f, 1194.0f, 1195.0f, 1196.0f, 1197.0f, 1198.0f, 1199.0f, 1200.0f, 1201.0f, 1202.0f, 1203.0f, 1204.0f, 1205.0f, 1206.0f, 1207.0f, 1208.0f, 1209.0f, 1210.0f, 1211.0f, 1212.0f, 1213.0f, 1214.0f, 1215.0f, 1216.0f, 1217.0f, 1218.0f, 1219.0f, 1220.0f, 1221.0f, 1222.0f, 1223.0f, 1224.0f, 1225.0f, 1226.0f, 1227.0f, 1228.0f, 1229.0f, 1230.0f, 1231.0f, 1232.0f, 1233.0f, 1234.0f, 1235.0f, 1236.0f, 1237.0f, 1238.0f, 1239.0f, 1240.0f, 1241.0f, 1242.0f, 1243.0f, 1244.0f, 1245.0f, 1246.0f, 1247.0f, 1248.0f, 1249.0f, 1250.0f, 1251.0f, 1252.0f, 1253.0f, 1254.0f, 1255.0f, 1256.0f, 1257.0f, 1258.0f, 1259.0f, 1260.0f, 1261.0f, 1262.0f, 1263.0f, 1264.0f, 1265.0f, 1266.0f, 1267.0f, 1268.0f, 1269.0f, 1270.0f, 1271.0f, 1272.0f, 1273.0f, 1274.0f, 1275.0f, 1276.0f, 1277.0f, 1278.0f, 1279.0f, 1280.0f, 1281.0f, 1282.0f, 1283.0f, 1284.0f, 1285.0f, 1286.0f, 1287.0f, 1288.0f, 1289.0f, 1290.0f, 1291.0f, 1292.0f, 1293.0f, 1294.0f, 1295.0f, 1296.0f, 1297.0f, 1298.0f, 1299.0f, 1300.0f, 1301.0f, 1302.0f, 1303.0f, 1304.0f, 1305.0f, 1306.0f, 1307.0f, 1308.0f, 1309.0f, 1310.0f, 1311.0f, 1312.0f, 1313.0f, 1314.0f, 1315.0f, 1316.0f, 1317.0f, 1318.0f, 1319.0f, 1320.0f, 1321.0f, 1322.0f, 1323.0f, 1324.0f, 1325.0f, 1326.0f, 1327.0f, 1328.0f, 1329.0f, 1330.0f, 1331.0f, 1332.0f, 1333.0f, 1334.0f, 1335.0f, 1336.0f, 1337.0f, 1338.0f, 1339.0f, 1340.0f, 1341.0f, 1342.0f, 1343.0f, 1344.0f, 1345.0f, 1346.0f, 1347.0f, 1348.0f, 1349.0f, 1350.0f, 1351.0f, 1352.0f, 1353.0f, 1354.0f, 1355.0f, 1356.0f, 1357.0f, 1358.0f, 1359.0f, 1360.0f, 1361.0f, 1362.0f, 1363.0f, 1364.0f, 1365.0f, 1366.0f, 1367.0f, 1368.0f, 1369.0f, 1370.0f, 1371.0f, 1372.0f, 1373.0f, 1374.0f, 1375.0f, 1376.0f, 1377.0f, 1378.0f, 1379.0f, 1380.0f, 1381.0f, 1382.0f, 1383.0f, 1384.0f, 1385.0f, 1386.0f, 1387.0f, 1388.0f, 1389.0f, 1390.0f, 1391.0f, 1392.0f, 1393.0f, 1394.0f, 1395.0f, 1396.0f, 1397.0f, 1398.0f, 1399.0f, 1400.0f, 1401.0f, 1402.0f, 1403.0f, 1404.0f, 1405.0f, 1406.0f, 1407.0f, 1408.0f, 1409.0f, 1410.0f, 1411.0f, 1412.0f, 1413.0f, 1414.0f, 1415.0f, 1416.0f, 1417.0f, 1418.0f, 1419.0f, 1420.0f, 1421.0f, 1422.0f, 1423.0f, 1424.0f, 1425.0f, 1426.0f, 1427.0f, 1428.0f, 1429.0f, 1430.0f, 1431.0f, 1432.0f, 1433.0f, 1434.0f, 1435.0f, 1436.0f, 1437.0f, 1438.0f, 1439.0f, 1440.0f, 1441.0f, 1442.0f, 1443.0f, 1444.0f, 1445.0f, 1446.0f, 1447.0f, 1448.0f, 1449.0f, 1450.0f, 1451.0f, 1452.0f, 1453.0f, 1454.0f, 1455.0f, 1456.0f, 1457.0f, 1458.0f, 1459.0f, 1460.0f, 1461.0f, 1462.0f, 1463.0f, 1464.0f, 1465.0f, 1466.0f, 1467.0f, 1468.0f, 1469.0f, 1470.0f, 1471.0f, 1472.0f, 1473.0f, 1474.0f, 1475.0f, 1476.0f, 1477.0f, 1478.0f, 1479.0f, 1480.0f, 1481.0f, 1482.0f, 1483.0f, 1484.0f, 1485.0f, 1486.0f, 1487.0f, 1488.0f, 1489.0f, 1490.0f, 1491.0f, 1492.0f, 1493.0f, 1494.0f, 1495.0f, 1496.0f, 1497.0f, 1498.0f, 1499.0f, 1500.0f, 1501.0f, 1502.0f, 1503.0f, 1504.0f, 1505.0f, 1506.0f, 1507.0f, 1508.0f, 1509.0f, 1510.0f, 1511.0f, 1512.0f, 1513.0f, 1514.0f, 1515.0f, 1516.0f, 1517.0f, 1518.0f, 1519.0f, 1520.0f, 1521.0f, 1522.0f, 1523.0f, 1524.0f, 1525.0f, 1526.0f, 1527.0f, 1528.0f, 1529.0f, 1530.0f, 1531.0f, 1532.0f, 1533.0f, 1534.0f, 1535.0f, 1536.0f, 1537.0f, 1538.0f, 1539.0f, 1540.0f, 1541.0f, 1542.0f, 1543.0f, 1544.0f, 1545.0f, 1546.0f, 1547.0f, 1548.0f, 1549.0f, 1550.0f, 1551.0f, 1552.0f, 1553.0f, 1554.0f, 1555.0f, 1556.0f, 1557.0f, 1558.0f, 1559.0f, 1560.0f, 1561.0f, 1562.0f, 1563.0f, 1564.0f, 1565.0f, 1566.0f, 1567.0f, 1568.0f, 1569.0f, 1570.0f, 1571.0f, 1572.0f, 1573.0f, 1574.0f, 1575.0f, 1576.0f, 1577.0f, 1578.0f, 1579.0f, 1580.0f, 1581.0f, 1582.0f, 1583.0f, 1584.0f, 1585.0f, 1586.0f, 1587.0f, 1588.0f, 1589.0f, 1590.0f, 1591.0f, 1592.0f, 1593.0f, 1594.0f, 1595.0f, 1596.0f, 1597.0f, 1598.0f, 1599.0f, 1600.0f, 1601.0f, 1602.0f, 1603.0f, 1604.0f, 1605.0f, 1606.0f, 1607.0f, 1608.0f, 1609.0f, 1610.0f, 1611.0f, 1612.0f, 1613.0f, 1614.0f, 1615.0f, 1616.0f, 1617.0f, 1618.0f, 1619.0f, 1620.0f, 1621.0f, 1622.0f, 1623.0f, 1624.0f, 1625.0f, 1626.0f, 1627.0f, 1628.0f, 1629.0f, 1630.0f, 1631.0f, 1632.0f, 1633.0f, 1634.0f, 1635.0f, 1636.0f, 1637.0f, 1638.0f, 1639.0f, 1640.0f, 1641.0f, 1642.0f, 1643.0f, 1644.0f, 1645.0f, 1646.0f, 1647.0f, 1648.0f, 1649.0f, 1650.0f, 1651.0f, 1652.0f, 1653.0f, 1654.0f, 1655.0f, 1656.0f, 1657.0f, 1658.0f, 1659.0f, 1660.0f, 1661.0f, 1662.0f, 1663.0f, 1664.0f, 1665.0f, 1666.0f, 1667.0f, 1668.0f, 1669.0f, 1670.0f, 1671.0f, 1672.0f, 1673.0f, 1674.0f, 1675.0f, 1676.0f, 1677.0f, 1678.0f, 1679.0f, 1680.0f, 1681.0f, 1682.0f, 1683.0f, 1684.0f, 1685.0f, 1686.0f, 1687.0f, 1688.0f, 1689.0f, 1690.0f, 1691.0f, 1692.0f, 1693.0f, 1694.0f, 1695.0f, 1696.0f, 1697.0f, 1698.0f, 1699.0f, 1700.0f, 1701.0f, 1702.0f, 1703.0f, 1704.0f, 1705.0f, 1706.0f, 1707.0f, 1708.0f, 1709.0f, 1710.0f, 1711.0f, 1712.0f, 1713.0f, 1714.0f, 1715.0f, 1716.0f, 1717.0f, 1718.0f, 1719.0f, 1720.0f, 1721.0f, 1722.0f, 1723.0f, 1724.0f, 1725.0f, 1726.0f, 1727.0f, 1728.0f, 1729.0f, 1730.0f, 1731.0f, 1732.0f, 1733.0f, 1734.0f, 1735.0f, 1736.0f, 1737.0f, 1738.0f, 1739.0f, 1740.0f, 1741.0f, 1742.0f, 1743.0f, 1744.0f, 1745.0f, 1746.0f, 1747.0f, 1748.0f, 1749.0f, 1750.0f, 1751.0f, 1752.0f, 1753.0f, 1754.0f, 1755.0f, 1756.0f, 1757.0f, 1758.0f, 1759.0f, 1760.0f, 1761.0f, 1762.0f, 1763.0f, 1764.0f, 1765.0f, 1766.0f, 1767.0f, 1768.0f, 1769.0f, 1770.0f, 1771.0f, 1772.0f, 1773.0f, 1774.0f, 1775.0f, 1776.0f, 1777.0f, 1778.0f, 1779.0f, 1780.0f, 1781.0f, 1782.0f, 1783.0f, 1784.0f, 1785.0f, 1786.0f, 1787.0f, 1788.0f, 1789.0f, 1790.0f, 1791.0f, 1792.0f, 1793.0f, 1794.0f, 1795.0f, 1796.0f, 1797.0f, 1798.0f, 1799.0f, 1800.0f, 1801.0f, 1802.0f, 1803.0f, 1804.0f, 1805.0f, 1806.0f, 1807.0f, 1808.0f, 1809.0f, 1810.0f, 1811.0f, 1812.0f, 1813.0f, 1814.0f, 1815.0f, 1816.0f, 1817.0f, 1818.0f, 1819.0f, 1820.0f, 1821.0f, 1822.0f, 1823.0f, 1824.0f, 1825.0f, 1826.0f, 1827.0f, 1828.0f, 1829.0f, 1830.0f, 1831.0f, 1832.0f, 1833.0f, 1834.0f, 1835.0f, 1836.0f, 1837.0f, 1838.0f, 1839.0f, 1840.0f, 1841.0f, 1842.0f, 1843.0f, 1844.0f, 1845.0f, 1846.0f, 1847.0f, 1848.0f, 1849.0f, 1850.0f, 1851.0f, 1852.0f, 1853.0f, 1854.0f, 1855.0f, 1856.0f, 1857.0f, 1858.0f, 1859.0f, 1860.0f, 1861.0f, 1862.0f, 1863.0f, 1864.0f, 1865.0f, 1866.0f, 1867.0f, 1868.0f, 1869.0f, 1870.0f, 1871.0f, 1872.0f, 1873.0f, 1874.0f, 1875.0f, 1876.0f, 1877.0f, 1878.0f, 1879.0f, 1880.0f, 1881.0f, 1882.0f, 1883.0f, 1884.0f, 1885.0f, 1886.0f, 1887.0f, 1888.0f, 1889.0f, 1890.0f, 1891.0f, 1892.0f, 1893.0f, 1894.0f, 1895.0f, 1896.0f, 1897.0f, 1898.0f, 1899.0f, 1900.0f, 1901.0f, 1902.0f, 1903.0f, 1904.0f, 1905.0f, 1906.0f, 1907.0f, 1908.0f, 1909.0f, 1910.0f, 1911.0f, 1912.0f, 1913.0f, 1914.0f, 1915.0f, 1916.0f, 1917.0f, 1918.0f, 1919.0f, 1920.0f, 1921.0f, 1922.0f, 1923.0f, 1924.0f, 1925.0f, 1926.0f, 1927.0f, 1928.0f, 1929.0f, 1930.0f, 1931.0f, 1932.0f, 1933.0f, 1934.0f, 1935.0f, 1936.0f, 1937.0f, 1938.0f, 1939.0f, 1940.0f, 1941.0f, 1942.0f, 1943.0f, 1944.0f, 1945.0f, 1946.0f, 1947.0f, 1948.0f, 1949.0f, 1950.0f, 1951.0f, 1952.0f, 1953.0f, 1954.0f, 1955.0f, 1956.0f, 1957.0f, 1958.0f, 1959.0f, 1960.0f, 1961.0f, 1962.0f, 1963.0f, 1964.0f, 1965.0f, 1966.0f, 1967.0f, 1968.0f, 1969.0f, 1970.0f, 1971.0f, 1972.0f, 1973.0f, 1974.0f, 1975.0f, 1976.0f, 1977.0f, 1978.0f, 1979.0f, 1980.0f, 1981.0f, 1982.0f, 1983.0f, 1984.0f, 1985.0f, 1986.0f, 1987.0f, 1988.0f, 1989.0f, 1990.0f, 1991.0f, 1992.0f, 1993.0f, 1994.0f, 1995.0f, 1996.0f, 1997.0f, 1998.0f, 1999.0f, 2000.0f, 2001.0f, 2002.0f, 2003.0f, 2004.0f, 2005.0f, 2006.0f, 2007.0f, 2008.0f, 2009.0f, 2010.0f, 2011.0f, 2012.0f, 2013.0f, 2014.0f, 2015.0f, 2016.0f, 2017.0f, 2018.0f, 2019.0f, 2020.0f, 2021.0f, 2022.0f, 2023.0f, 2024.0f, 2025.0f, 2026.0f, 2027.0f, 2028.0f, 2029.0f, 2030.0f, 2031.0f, 2032.0f, 2033.0f, 2034.0f, 2035.0f, 2036.0f, 2037.0f, 2038.0f, 2039.0f, 2040.0f, 2041.0f, 2042.0f, 2043.0f, 2044.0f, 2045.0f, 2046.0f, 2047.0f, 2048.0f, 2049.0f, 2050.0f, 2051.0f, 2052.0f, 2053.0f, 2054.0f, 2055.0f, 2056.0f, 2057.0f, 2058.0f, 2059.0f, 2060.0f, 2061.0f, 2062.0f, 2063.0f, 2064.0f, 2065.0f, 2066.0f, 2067.0f, 2068.0f, 2069.0f, 2070.0f, 2071.0f, 2072.0f, 2073.0f, 2074.0f, 2075.0f, 2076.0f, 2077.0f, 2078.0f, 2079.0f, 2080.0f, 2081.0f, 2082.0f, 2083.0f, 2084.0f, 2085.0f, 2086.0f, 2087.0f, 2088.0f, 2089.0f, 2090.0f, 2091.0f, 2092.0f, 2093.0f, 2094.0f, 2095.0f, 2096.0f, 2097.0f, 2098.0f, 2099.0f, 2100.0f, 2101.0f, 2102.0f, 2103.0f, 2104.0f, 2105.0f, 2106.0f, 2107.0f, 2108.0f, 2109.0f, 2110.0f, 2111.0f, 2112.0f, 2113.0f, 2114.0f, 2115.0f, 2116.0f, 2117.0f, 2118.0f, 2119.0f, 2120.0f, 2121.0f, 2122.0f, 2123.0f, 2124.0f, 2125.0f, 2126.0f, 2127.0f, 2128.0f, 2129.0f, 2130.0f, 2131.0f, 2132.0f, 2133.0f, 2134.0f, 2135.0f, 2136.0f, 2137.0f, 2138.0f, 2139.0f, 2140.0f, 2141.0f, 2142.0f, 2143.0f, 2144.0f, 2145.0f, 2146.0f, 2147.0f, 2148.0f, 2149.0f, 2150.0f, 2151.0f, 2152.0f, 2153.0f, 2154.0f, 2155.0f, 2156.0f, 2157.0f, 2158.0f, 2159.0f, 2160.0f, 2161.0f, 2162.0f, 2163.0f, 2164.0f, 2165.0f, 2166.0f, 2167.0f, 2168.0f, 2169.0f, 2170.0f, 2171.0f, 2172.0f, 2173.0f, 2174.0f, 2175.0f, 2176.0f, 2177.0f, 2178.0f, 2179.0f, 2180.0f, 2181.0f, 2182.0f, 2183.0f, 2184.0f, 2185.0f, 2186.0f, 2187.0f, 2188.0f, 2189.0f, 2190.0f, 2191.0f, 2192.0f, 2193.0f, 2194.0f, 2195.0f, 2196.0f, 2197.0f, 2198.0f, 2199.0f, 2200.0f, 2201.0f, 2202.0f, 2203.0f, 2204.0f, 2205.0f, 2206.0f, 2207.0f, 2208.0f, 2209.0f, 2210.0f, 2211.0f, 2212.0f, 2213.0f, 2214.0f, 2215.0f, 2216.0f, 2217.0f, 2218.0f, 2219.0f, 2220.0f, 2221.0f, 2222.0f, 2223.0f, 2224.0f, 2225.0f, 2226.0f, 2227.0f, 2228.0f, 2229.0f, 2230.0f, 2231.0f, 2232.0f, 2233.0f, 2234.0f, 2235.0f, 2236.0f, 2237.0f, 2238.0f, 2239.0f, 2240.0f, 2241.0f, 2242.0f, 2243.0f, 2244.0f, 2245.0f, 2246.0f, 2247.0f, 2248.0f, 2249.0f, 2250.0f, 2251.0f, 2252.0f, 2253.0f, 2254.0f, 2255.0f, 2256.0f, 2257.0f, 2258.0f, 2259.0f, 2260.0f, 2261.0f, 2262.0f, 2263.0f, 2264.0f, 2265.0f, 2266.0f, 2267.0f, 2268.0f, 2269.0f, 2270.0f, 2271.0f, 2272.0f, 2273.0f, 2274.0f, 2275.0f, 2276.0f, 2277.0f, 2278.0f, 2279.0f, 2280.0f, 2281.0f, 2282.0f, 2283.0f, 2284.0f, 2285.0f, 2286.0f, 2287.0f, 2288.0f, 2289.0f, 2290.0f, 2291.0f, 2292.0f, 2293.0f, 2294.0f, 2295.0f, 2296.0f, 2297.0f, 2298.0f, 2299.0f, 2300.0f, 2301.0f, 2302.0f, 2303.0f, 2304.0f, 2305.0f, 2306.0f, 2307.0f, 2308.0f, 2309.0f, 2310.0f, 2311.0f, 2312.0f, 2313.0f, 2314.0f, 2315.0f, 2316.0f, 2317.0f, 2318.0f, 2319.0f, 2320.0f, 2321.0f, 2322.0f, 2323.0f, 2324.0f, 2325.0f, 2326.0f, 2327.0f, 2328.0f, 2329.0f, 2330.0f, 2331.0f, 2332.0f, 2333.0f, 2334.0f, 2335.0f, 2336.0f, 2337.0f, 2338.0f, 2339.0f, 2340.0f, 2341.0f, 2342.0f, 2343.0f, 2344.0f, 2345.0f, 2346.0f, 2347.0f, 2348.0f, 2349.0f, 2350.0f, 2351.0f, 2352.0f, 2353.0f, 2354.0f, 2355.0f, 2356.0f, 2357.0f, 2358.0f, 2359.0f, 2360.0f, 2361.0f, 2362.0f, 2363.0f, 2364.0f, 2365.0f, 2366.0f, 2367.0f, 2368.0f, 2369.0f, 2370.0f, 2371.0f, 2372.0f, 2373.0f, 2374.0f, 2375.0f, 2376.0f, 2377.0f, 2378.0f, 2379.0f, 2380.0f, 2381.0f, 2382.0f, 2383.0f, 2384.0f, 2385.0f, 2386.0f, 2387.0f, 2388.0f, 2389.0f, 2390.0f, 2391.0f, 2392.0f, 2393.0f, 2394.0f, 2395.0f, 2396.0f, 2397.0f, 2398.0f, 2399.0f, 2400.0f, 2401.0f, 2402.0f, 2403.0f, 2404.0f, 2405.0f, 2406.0f, 2407.0f, 2408.0f, 2409.0f, 2410.0f, 2411.0f, 2412.0f, 2413.0f, 2414.0f, 2415.0f, 2416.0f, 2417.0f, 2418.0f, 2419.0f, 2420.0f, 2421.0f, 2422.0f, 2423.0f, 2424.0f, 2425.0f, 2426.0f, 2427.0f, 2428.0f, 2429.0f, 2430.0f, 2431.0f, 2432.0f, 2433.0f, 2434.0f, 2435.0f, 2436.0f, 2437.0f, 2438.0f, 2439.0f, 2440.0f, 2441.0f, 2442.0f, 2443.0f, 2444.0f, 2445.0f, 2446.0f, 2447.0f, 2448.0f, 2449.0f, 2450.0f, 2451.0f, 2452.0f, 2453.0f, 2454.0f, 2455.0f, 2456.0f, 2457.0f, 2458.0f, 2459.0f, 2460.0f, 2461.0f, 2462.0f, 2463.0f, 2464.0f, 2465.0f, 2466.0f, 2467.0f, 2468.0f, 2469.0f, 2470.0f, 2471.0f, 2472.0f, 2473.0f, 2474.0f, 2475.0f, 2476.0f, 2477.0f, 2478.0f, 2479.0f, 2480.0f, 2481.0f, 2482.0f, 2483.0f, 2484.0f, 2485.0f, 2486.0f, 2487.0f, 2488.0f, 2489.0f, 2490.0f, 2491.0f, 2492.0f, 2493.0f, 2494.0f, 2495.0f, 2496.0f, 2497.0f, 2498.0f, 2499.0f, 2500.0f, 2501.0f, 2502.0f, 2503.0f, 2504.0f, 2505.0f, 2506.0f, 2507.0f, 2508.0f, 2509.0f, 2510.0f, 2511.0f, 2512.0f, 2513.0f, 2514.0f, 2515.0f, 2516.0f, 2517.0f, 2518.0f, 2519.0f, 2520.0f, 2521.0f, 2522.0f, 2523.0f, 2524.0f, 2525.0f, 2526.0f, 2527.0f, 2528.0f, 2529.0f, 2530.0f, 2531.0f, 2532.0f, 2533.0f, 2534.0f, 2535.0f, 2536.0f, 2537.0f, 2538.0f, 2539.0f, 2540.0f, 2541.0f, 2542.0f, 2543.0f, 2544.0f, 2545.0f, 2546.0f, 2547.0f, 2548.0f, 2549.0f, 2550.0f, 2551.0f, 2552.0f, 2553.0f, 2554.0f, 2555.0f, 2556.0f, 2557.0f, 2558.0f, 2559.0f, 2560.0f, 2561.0f, 2562.0f, 2563.0f, 2564.0f, 2565.0f, 2566.0f, 2567.0f, 2568.0f, 2569.0f, 2570.0f, 2571.0f, 2572.0f, 2573.0f, 2574.0f, 2575.0f, 2576.0f, 2577.0f, 2578.0f, 2579.0f, 2580.0f, 2581.0f, 2582.0f, 2583.0f, 2584.0f, 2585.0f, 2586.0f, 2587.0f, 2588.0f, 2589.0f, 2590.0f, 2591.0f, 2592.0f, 2593.0f, 2594.0f, 2595.0f, 2596.0f, 2597.0f, 2598.0f, 2599.0f, 2600.0f, 2601.0f, 2602.0f, 2603.0f, 2604.0f, 2605.0f, 2606.0f, 2607.0f, 2608.0f, 2609.0f, 2610.0f, 2611.0f, 2612.0f, 2613.0f, 2614.0f, 2615.0f, 2616.0f, 2617.0f, 2618.0f, 2619.0f, 2620.0f, 2621.0f, 2622.0f, 2623.0f, 2624.0f, 2625.0f, 2626.0f, 2627.0f, 2628.0f, 2629.0f, 2630.0f, 2631.0f, 2632.0f, 2633.0f, 2634.0f, 2635.0f, 2636.0f, 2637.0f, 2638.0f, 2639.0f, 2640.0f, 2641.0f, 2642.0f, 2643.0f, 2644.0f, 2645.0f, 2646.0f, 2647.0f, 2648.0f, 2649.0f, 2650.0f, 2651.0f, 2652.0f, 2653.0f, 2654.0f, 2655.0f, 2656.0f, 2657.0f, 2658.0f, 2659.0f, 2660.0f, 2661.0f, 2662.0f, 2663.0f, 2664.0f, 2665.0f, 2666.0f, 2667.0f, 2668.0f, 2669.0f, 2670.0f, 2671.0f, 2672.0f, 2673.0f, 2674.0f, 2675.0f, 2676.0f, 2677.0f, 2678.0f, 2679.0f, 2680.0f, 2681.0f, 2682.0f, 2683.0f, 2684.0f, 2685.0f, 2686.0f, 2687.0f, 2688.0f, 2689.0f, 2690.0f, 2691.0f, 2692.0f, 2693.0f, 2694.0f, 2695.0f, 2696.0f, 2697.0f, 2698.0f, 2699.0f, 2700.0f, 2701.0f, 2702.0f, 2703.0f, 2704.0f, 2705.0f, 2706.0f, 2707.0f, 2708.0f, 2709.0f, 2710.0f, 2711.0f, 2712.0f, 2713.0f, 2714.0f, 2715.0f, 2716.0f, 2717.0f, 2718.0f, 2719.0f, 2720.0f, 2721.0f, 2722.0f, 2723.0f, 2724.0f, 2725.0f, 2726.0f, 2727.0f, 2728.0f, 2729.0f, 2730.0f, 2731.0f, 2732.0f, 2733.0f, 2734.0f, 2735.0f, 2736.0f, 2737.0f, 2738.0f, 2739.0f, 2740.0f, 2741.0f, 2742.0f, 2743.0f, 2744.0f, 2745.0f, 2746.0f, 2747.0f, 2748.0f, 2749.0f, 2750.0f, 2751.0f, 2752.0f, 2753.0f, 2754.0f, 2755.0f, 2756.0f, 2757.0f, 2758.0f, 2759.0f, 2760.0f, 2761.0f, 2762.0f, 2763.0f, 2764.0f, 2765.0f, 2766.0f, 2767.0f, 2768.0f, 2769.0f, 2770.0f, 2771.0f, 2772.0f, 2773.0f, 2774.0f, 2775.0f, 2776.0f, 2777.0f, 2778.0f, 2779.0f, 2780.0f, 2781.0f, 2782.0f, 2783.0f, 2784.0f, 2785.0f, 2786.0f, 2787.0f, 2788.0f, 2789.0f, 2790.0f, 2791.0f, 2792.0f, 2793.0f, 2794.0f, 2795.0f, 2796.0f, 2797.0f, 2798.0f, 2799.0f, 2800.0f, 2801.0f, 2802.0f, 2803.0f, 2804.0f, 2805.0f, 2806.0f, 2807.0f, 2808.0f, 2809.0f, 2810.0f, 2811.0f, 2812.0f, 2813.0f, 2814.0f, 2815.0f, 2816.0f, 2817.0f, 2818.0f, 2819.0f, 2820.0f, 2821.0f, 2822.0f, 2823.0f, 2824.0f, 2825.0f, 2826.0f, 2827.0f, 2828.0f, 2829.0f, 2830.0f, 2831.0f, 2832.0f, 2833.0f, 2834.0f, 2835.0f, 2836.0f, 2837.0f, 2838.0f, 2839.0f, 2840.0f, 2841.0f, 2842.0f, 2843.0f, 2844.0f, 2845.0f, 2846.0f, 2847.0f, 2848.0f, 2849.0f, 2850.0f, 2851.0f, 2852.0f, 2853.0f, 2854.0f, 2855.0f, 2856.0f, 2857.0f, 2858.0f, 2859.0f, 2860.0f, 2861.0f, 2862.0f, 2863.0f, 2864.0f, 2865.0f, 2866.0f, 2867.0f, 2868.0f, 2869.0f, 2870.0f, 2871.0f, 2872.0f, 2873.0f, 2874.0f, 2875.0f, 2876.0f, 2877.0f, 2878.0f, 2879.0f, 2880.0f, 2881.0f, 2882.0f, 2883.0f, 2884.0f, 2885.0f, 2886.0f, 2887.0f, 2888.0f, 2889.0f, 2890.0f, 2891.0f, 2892.0f, 2893.0f, 2894.0f, 2895.0f, 2896.0f, 2897.0f, 2898.0f, 2899.0f, 2900.0f, 2901.0f, 2902.0f, 2903.0f, 2904.0f, 2905.0f, 2906.0f, 2907.0f, 2908.0f, 2909.0f, 2910.0f, 2911.0f, 2912.0f, 2913.0f, 2914.0f, 2915.0f, 2916.0f, 2917.0f, 2918.0f, 2919.0f, 2920.0f, 2921.0f, 2922.0f, 2923.0f, 2924.0f, 2925.0f, 2926.0f, 2927.0f, 2928.0f, 2929.0f, 2930.0f, 2931.0f, 2932.0f, 2933.0f, 2934.0f, 2935.0f, 2936.0f, 2937.0f, 2938.0f, 2939.0f, 2940.0f, 2941.0f, 2942.0f, 2943.0f, 2944.0f, 2945.0f, 2946.0f, 2947.0f, 2948.0f, 2949.0f, 2950.0f, 2951.0f, 2952.0f, 2953.0f, 2954.0f, 2955.0f, 2956.0f, 2957.0f, 2958.0f, 2959.0f, 2960.0f, 2961.0f, 2962.0f, 2963.0f, 2964.0f, 2965.0f, 2966.0f, 2967.0f, 2968.0f, 2969.0f, 2970.0f, 2971.0f, 2972.0f, 2973.0f, 2974.0f, 2975.0f, 2976.0f, 2977.0f, 2978.0f, 2979.0f, 2980.0f, 2981.0f, 2982.0f, 2983.0f, 2984.0f, 2985.0f, 2986.0f, 2987.0f, 2988.0f, 2989.0f, 2990.0f, 2991.0f, 2992.0f, 2993.0f, 2994.0f, 2995.0f, 2996.0f, 2997.0f, 2998.0f, 2999.0f, 3000.0f, 3001.0f, 3002.0f, 3003.0f, 3004.0f, 3005.0f, 3006.0f, 3007.0f, 3008.0f, 3009.0f, 3010.0f, 3011.0f, 3012.0f, 3013.0f, 3014.0f, 3015.0f, 3016.0f, 3017.0f, 3018.0f, 3019.0f, 3020.0f, 3021.0f, 3022.0f, 3023.0f, 3024.0f, 3025.0f, 3026.0f, 3027.0f, 3028.0f, 3029.0f, 3030.0f, 3031.0f, 3032.0f, 3033.0f, 3034.0f, 3035.0f, 3036.0f, 3037.0f, 3038.0f, 3039.0f, 3040.0f, 3041.0f, 3042.0f, 3043.0f, 3044.0f, 3045.0f, 3046.0f, 3047.0f, 3048.0f, 3049.0f, 3050.0f, 3051.0f, 3052.0f, 3053.0f, 3054.0f, 3055.0f, 3056.0f, 3057.0f, 3058.0f, 3059.0f, 3060.0f, 3061.0f, 3062.0f, 3063.0f, 3064.0f, 3065.0f, 3066.0f, 3067.0f, 3068.0f, 3069.0f, 3070.0f, 3071.0f, 3072.0f, 3073.0f, 3074.0f, 3075.0f, 3076.0f, 3077.0f, 3078.0f, 3079.0f, 3080.0f, 3081.0f, 3082.0f, 3083.0f, 3084.0f, 3085.0f, 3086.0f, 3087.0f, 3088.0f, 3089.0f, 3090.0f, 3091.0f, 3092.0f, 3093.0f, 3094.0f, 3095.0f, 3096.0f, 3097.0f, 3098.0f, 3099.0f, 3100.0f, 3101.0f, 3102.0f, 3103.0f, 3104.0f, 3105.0f, 3106.0f, 3107.0f, 3108.0f, 3109.0f, 3110.0f, 3111.0f, 3112.0f, 3113.0f, 3114.0f, 3115.0f, 3116.0f, 3117.0f, 3118.0f, 3119.0f, 3120.0f, 3121.0f, 3122.0f, 3123.0f, 3124.0f, 3125.0f, 3126.0f, 3127.0f, 3128.0f, 3129.0f, 3130.0f, 3131.0f, 3132.0f, 3133.0f, 3134.0f, 3135.0f, 3136.0f, 3137.0f, 3138.0f, 3139.0f, 3140.0f, 3141.0f, 3142.0f, 3143.0f, 3144.0f, 3145.0f, 3146.0f, 3147.0f, 3148.0f, 3149.0f, 3150.0f, 3151.0f, 3152.0f, 3153.0f, 3154.0f, 3155.0f, 3156.0f, 3157.0f, 3158.0f, 3159.0f, 3160.0f, 3161.0f, 3162.0f, 3163.0f, 3164.0f, 3165.0f, 3166.0f, 3167.0f, 3168.0f, 3169.0f, 3170.0f, 3171.0f, 3172.0f, 3173.0f, 3174.0f, 3175.0f, 3176.0f, 3177.0f, 3178.0f, 3179.0f, 3180.0f, 3181.0f, 3182.0f, 3183.0f, 3184.0f, 3185.0f, 3186.0f, 3187.0f, 3188.0f, 3189.0f, 3190.0f, 3191.0f, 3192.0f, 3193.0f, 3194.0f, 3195.0f, 3196.0f, 3197.0f, 3198.0f, 3199.0f, 3200.0f, 3201.0f, 3202.0f, 3203.0f, 3204.0f, 3205.0f, 3206.0f, 3207.0f, 3208.0f, 3209.0f, 3210.0f, 3211.0f, 3212.0f, 3213.0f, 3214.0f, 3215.0f, 3216.0f, 3217.0f, 3218.0f, 3219.0f, 3220.0f, 3221.0f, 3222.0f, 3223.0f, 3224.0f, 3225.0f, 3226.0f, 3227.0f, 3228.0f, 3229.0f, 3230.0f, 3231.0f, 3232.0f, 3233.0f, 3234.0f, 3235.0f, 3236.0f, 3237.0f, 3238.0f, 3239.0f, 3240.0f, 3241.0f, 3242.0f, 3243.0f, 3244.0f, 3245.0f, 3246.0f, 3247.0f, 3248.0f, 3249.0f, 3250.0f, 3251.0f, 3252.0f, 3253.0f, 3254.0f, 3255.0f, 3256.0f, 3257.0f, 3258.0f, 3259.0f, 3260.0f, 3261.0f, 3262.0f, 3263.0f, 3264.0f, 3265.0f, 3266.0f, 3267.0f, 3268.0f, 3269.0f, 3270.0f, 3271.0f, 3272.0f, 3273.0f, 3274.0f, 3275.0f, 3276.0f, 3277.0f, 3278.0f, 3279.0f, 3280.0f, 3281.0f, 3282.0f, 3283.0f, 3284.0f, 3285.0f, 3286.0f, 3287.0f, 3288.0f, 3289.0f, 3290.0f, 3291.0f, 3292.0f, 3293.0f, 3294.0f, 3295.0f, 3296.0f, 3297.0f, 3298.0f, 3299.0f, 3300.0f, 3301.0f, 3302.0f, 3303.0f, 3304.0f, 3305.0f, 3306.0f, 3307.0f, 3308.0f, 3309.0f, 3310.0f, 3311.0f, 3312.0f, 3313.0f, 3314.0f, 3315.0f, 3316.0f, 3317.0f, 3318.0f, 3319.0f, 3320.0f, 3321.0f, 3322.0f, 3323.0f, 3324.0f, 3325.0f, 3326.0f, 3327.0f, 3328.0f, 3329.0f, 3330.0f, 3331.0f, 3332.0f, 3333.0f, 3334.0f, 3335.0f, 3336.0f, 3337.0f, 3338.0f, 3339.0f, 3340.0f, 3341.0f, 3342.0f, 3343.0f, 3344.0f, 3345.0f, 3346.0f, 3347.0f, 3348.0f, 3349.0f, 3350.0f, 3351.0f, 3352.0f, 3353.0f, 3354.0f, 3355.0f, 3356.0f, 3357.0f, 3358.0f, 3359.0f, 3360.0f, 3361.0f, 3362.0f, 3363.0f, 3364.0f, 3365.0f, 3366.0f, 3367.0f, 3368.0f, 3369.0f, 3370.0f, 3371.0f, 3372.0f, 3373.0f, 3374.0f, 3375.0f, 3376.0f, 3377.0f, 3378.0f, 3379.0f, 3380.0f, 3381.0f, 3382.0f, 3383.0f, 3384.0f, 3385.0f, 3386.0f, 3387.0f, 3388.0f, 3389.0f, 3390.0f, 3391.0f, 3392.0f, 3393.0f, 3394.0f, 3395.0f, 3396.0f, 3397.0f, 3398.0f, 3399.0f, 3400.0f, 3401.0f, 3402.0f, 3403.0f, 3404.0f, 3405.0f, 3406.0f, 3407.0f, 3408.0f, 3409.0f, 3410.0f, 3411.0f, 3412.0f, 3413.0f, 3414.0f, 3415.0f, 3416.0f, 3417.0f, 3418.0f, 3419.0f, 3420.0f, 3421.0f, 3422.0f, 3423.0f, 3424.0f, 3425.0f, 3426.0f, 3427.0f, 3428.0f, 3429.0f, 3430.0f, 3431.0f, 3432.0f, 3433.0f, 3434.0f, 3435.0f, 3436.0f, 3437.0f, 3438.0f, 3439.0f, 3440.0f, 3441.0f, 3442.0f, 3443.0f, 3444.0f, 3445.0f, 3446.0f, 3447.0f, 3448.0f, 3449.0f, 3450.0f, 3451.0f, 3452.0f, 3453.0f, 3454.0f, 3455.0f, 3456.0f, 3457.0f, 3458.0f, 3459.0f, 3460.0f, 3461.0f, 3462.0f, 3463.0f, 3464.0f, 3465.0f, 3466.0f, 3467.0f, 3468.0f, 3469.0f, 3470.0f, 3471.0f, 3472.0f, 3473.0f, 3474.0f, 3475.0f, 3476.0f, 3477.0f, 3478.0f, 3479.0f, 3480.0f, 3481.0f, 3482.0f, 3483.0f, 3484.0f, 3485.0f, 3486.0f, 3487.0f, 3488.0f, 3489.0f, 3490.0f, 3491.0f, 3492.0f, 3493.0f, 3494.0f, 3495.0f, 3496.0f, 3497.0f, 3498.0f, 3499.0f, 3500.0f, 3501.0f, 3502.0f, 3503.0f, 3504.0f, 3505.0f, 3506.0f, 3507.0f, 3508.0f, 3509.0f, 3510.0f, 3511.0f, 3512.0f, 3513.0f, 3514.0f, 3515.0f, 3516.0f, 3517.0f, 3518.0f, 3519.0f, 3520.0f, 3521.0f, 3522.0f, 3523.0f, 3524.0f, 3525.0f, 3526.0f, 3527.0f, 3528.0f, 3529.0f, 3530.0f, 3531.0f, 3532.0f, 3533.0f, 3534.0f, 3535.0f, 3536.0f, 3537.0f, 3538.0f, 3539.0f, 3540.0f, 3541.0f, 3542.0f, 3543.0f, 3544.0f, 3545.0f, 3546.0f, 3547.0f, 3548.0f, 3549.0f, 3550.0f, 3551.0f, 3552.0f, 3553.0f, 3554.0f, 3555.0f, 3556.0f, 3557.0f, 3558.0f, 3559.0f, 3560.0f, 3561.0f, 3562.0f, 3563.0f, 3564.0f, 3565.0f, 3566.0f, 3567.0f, 3568.0f, 3569.0f, 3570.0f, 3571.0f, 3572.0f, 3573.0f, 3574.0f, 3575.0f, 3576.0f, 3577.0f, 3578.0f, 3579.0f, 3580.0f, 3581.0f, 3582.0f, 3583.0f, 3584.0f, 3585.0f, 3586.0f, 3587.0f, 3588.0f, 3589.0f, 3590.0f, 3591.0f, 3592.0f, 3593.0f, 3594.0f, 3595.0f, 3596.0f, 3597.0f, 3598.0f, 3599.0f, 3600.0f, 3601.0f, 3602.0f, 3603.0f, 3604.0f, 3605.0f, 3606.0f, 3607.0f, 3608.0f, 3609.0f, 3610.0f, 3611.0f, 3612.0f, 3613.0f, 3614.0f, 3615.0f, 3616.0f, 3617.0f, 3618.0f, 3619.0f, 3620.0f, 3621.0f, 3622.0f, 3623.0f, 3624.0f, 3625.0f, 3626.0f, 3627.0f, 3628.0f, 3629.0f, 3630.0f, 3631.0f, 3632.0f, 3633.0f, 3634.0f, 3635.0f, 3636.0f, 3637.0f, 3638.0f, 3639.0f, 3640.0f, 3641.0f, 3642.0f, 3643.0f, 3644.0f, 3645.0f, 3646.0f, 3647.0f, 3648.0f, 3649.0f, 3650.0f, 3651.0f, 3652.0f, 3653.0f, 3654.0f, 3655.0f, 3656.0f, 3657.0f, 3658.0f, 3659.0f, 3660.0f, 3661.0f, 3662.0f, 3663.0f, 3664.0f, 3665.0f, 3666.0f, 3667.0f, 3668.0f, 3669.0f, 3670.0f, 3671.0f, 3672.0f, 3673.0f, 3674.0f, 3675.0f, 3676.0f, 3677.0f, 3678.0f, 3679.0f, 3680.0f, 3681.0f, 3682.0f, 3683.0f, 3684.0f, 3685.0f, 3686.0f, 3687.0f, 3688.0f, 3689.0f, 3690.0f, 3691.0f, 3692.0f, 3693.0f, 3694.0f, 3695.0f, 3696.0f, 3697.0f, 3698.0f, 3699.0f, 3700.0f, 3701.0f, 3702.0f, 3703.0f, 3704.0f, 3705.0f, 3706.0f, 3707.0f, 3708.0f, 3709.0f, 3710.0f, 3711.0f, 3712.0f, 3713.0f, 3714.0f, 3715.0f, 3716.0f, 3717.0f, 3718.0f, 3719.0f, 3720.0f, 3721.0f, 3722.0f, 3723.0f, 3724.0f, 3725.0f, 3726.0f, 3727.0f, 3728.0f, 3729.0f, 3730.0f, 3731.0f, 3732.0f, 3733.0f, 3734.0f, 3735.0f, 3736.0f, 3737.0f, 3738.0f, 3739.0f, 3740.0f, 3741.0f, 3742.0f, 3743.0f, 3744.0f, 3745.0f, 3746.0f, 3747.0f, 3748.0f, 3749.0f, 3750.0f, 3751.0f, 3752.0f, 3753.0f, 3754.0f, 3755.0f, 3756.0f, 3757.0f, 3758.0f, 3759.0f, 3760.0f, 3761.0f, 3762.0f, 3763.0f, 3764.0f, 3765.0f, 3766.0f, 3767.0f, 3768.0f, 3769.0f, 3770.0f, 3771.0f, 3772.0f, 3773.0f, 3774.0f, 3775.0f, 3776.0f, 3777.0f, 3778.0f, 3779.0f, 3780.0f, 3781.0f, 3782.0f, 3783.0f, 3784.0f, 3785.0f, 3786.0f, 3787.0f, 3788.0f, 3789.0f, 3790.0f, 3791.0f, 3792.0f, 3793.0f, 3794.0f, 3795.0f, 3796.0f, 3797.0f, 3798.0f, 3799.0f, 3800.0f, 3801.0f, 3802.0f, 3803.0f, 3804.0f, 3805.0f, 3806.0f, 3807.0f, 3808.0f, 3809.0f, 3810.0f, 3811.0f, 3812.0f, 3813.0f, 3814.0f, 3815.0f, 3816.0f, 3817.0f, 3818.0f, 3819.0f, 3820.0f, 3821.0f, 3822.0f, 3823.0f, 3824.0f, 3825.0f, 3826.0f, 3827.0f, 3828.0f, 3829.0f, 3830.0f, 3831.0f, 3832.0f, 3833.0f, 3834.0f, 3835.0f, 3836.0f, 3837.0f, 3838.0f, 3839.0f, 3840.0f, 3841.0f, 3842.0f, 3843.0f, 3844.0f, 3845.0f, 3846.0f, 3847.0f, 3848.0f, 3849.0f, 3850.0f, 3851.0f, 3852.0f, 3853.0f, 3854.0f, 3855.0f, 3856.0f, 3857.0f, 3858.0f, 3859.0f, 3860.0f, 3861.0f, 3862.0f, 3863.0f, 3864.0f, 3865.0f, 3866.0f, 3867.0f, 3868.0f, 3869.0f, 3870.0f, 3871.0f, 3872.0f, 3873.0f, 3874.0f, 3875.0f, 3876.0f, 3877.0f, 3878.0f, 3879.0f, 3880.0f, 3881.0f, 3882.0f, 3883.0f, 3884.0f, 3885.0f, 3886.0f, 3887.0f, 3888.0f, 3889.0f, 3890.0f, 3891.0f, 3892.0f, 3893.0f, 3894.0f, 3895.0f, 3896.0f, 3897.0f, 3898.0f, 3899.0f, 3900.0f, 3901.0f, 3902.0f, 3903.0f, 3904.0f, 3905.0f, 3906.0f, 3907.0f, 3908.0f, 3909.0f, 3910.0f, 3911.0f, 3912.0f, 3913.0f, 3914.0f, 3915.0f, 3916.0f, 3917.0f, 3918.0f, 3919.0f, 3920.0f, 3921.0f, 3922.0f, 3923.0f, 3924.0f, 3925.0f, 3926.0f, 3927.0f, 3928.0f, 3929.0f, 3930.0f, 3931.0f, 3932.0f, 3933.0f, 3934.0f, 3935.0f, 3936.0f, 3937.0f, 3938.0f, 3939.0f, 3940.0f, 3941.0f, 3942.0f, 3943.0f, 3944.0f, 3945.0f, 3946.0f, 3947.0f, 3948.0f, 3949.0f, 3950.0f, 3951.0f, 3952.0f, 3953.0f, 3954.0f, 3955.0f, 3956.0f, 3957.0f, 3958.0f, 3959.0f, 3960.0f, 3961.0f, 3962.0f, 3963.0f, 3964.0f, 3965.0f, 3966.0f, 3967.0f, 3968.0f, 3969.0f, 3970.0f, 3971.0f, 3972.0f, 3973.0f, 3974.0f, 3975.0f, 3976.0f, 3977.0f, 3978.0f, 3979.0f, 3980.0f, 3981.0f, 3982.0f, 3983.0f, 3984.0f, 3985.0f, 3986.0f, 3987.0f, 3988.0f, 3989.0f, 3990.0f, 3991.0f, 3992.0f, 3993.0f, 3994.0f, 3995.0f, 3996.0f, 3997.0f, 3998.0f, 3999.0f, 4000.0f, 4001.0f, 4002.0f, 4003.0f, 4004.0f, 4005.0f, 4006.0f, 4007.0f, 4008.0f, 4009.0f, 4010.0f, 4011.0f, 4012.0f, 4013.0f, 4014.0f, 4015.0f, 4016.0f, 4017.0f, 4018.0f, 4019.0f, 4020.0f, 4021.0f, 4022.0f, 4023.0f, 4024.0f, 4025.0f, 4026.0f, 4027.0f, 4028.0f, 4029.0f, 4030.0f, 4031.0f, 4032.0f, 4033.0f, 4034.0f, 4035.0f, 4036.0f, 4037.0f, 4038.0f, 4039.0f, 4040.0f, 4041.0f, 4042.0f, 4043.0f, 4044.0f, 4045.0f, 4046.0f, 4047.0f, 4048.0f, 4049.0f, 4050.0f, 4051.0f, 4052.0f, 4053.0f, 4054.0f, 4055.0f, 4056.0f, 4057.0f, 4058.0f, 4059.0f, 4060.0f, 4061.0f, 4062.0f, 4063.0f, 4064.0f, 4065.0f, 4066.0f, 4067.0f, 4068.0f, 4069.0f, 4070.0f, 4071.0f, 4072.0f, 4073.0f, 4074.0f, 4075.0f, 4076.0f, 4077.0f, 4078.0f, 4079.0f, 4080.0f, 4081.0f, 4082.0f, 4083.0f, 4084.0f, 4085.0f, 4086.0f, 4087.0f, 4088.0f, 4089.0f, 4090.0f, 4091.0f, 4092.0f, 4093.0f, 4094.0f, 4095.0f, 4096.0f, 4097.0f, 4098.0f, 4099.0f, 4100.0f, 4101.0f, 4102.0f, 4103.0f, 4104.0f, 4105.0f, 4106.0f, 4107.0f, 4108.0f, 4109.0f, 4110.0f, 4111.0f, 4112.0f, 4113.0f, 4114.0f, 4115.0f, 4116.0f, 4117.0f, 4118.0f, 4119.0f, 4120.0f, 4121.0f, 4122.0f, 4123.0f, 4124.0f, 4125.0f, 4126.0f, 4127.0f, 4128.0f, 4129.0f, 4130.0f, 4131.0f, 4132.0f, 4133.0f, 4134.0f, 4135.0f, 4136.0f, 4137.0f, 4138.0f, 4139.0f, 4140.0f, 4141.0f, 4142.0f, 4143.0f, 4144.0f, 4145.0f, 4146.0f, 4147.0f, 4148.0f, 4149.0f, 4150.0f, 4151.0f, 4152.0f, 4153.0f, 4154.0f, 4155.0f, 4156.0f, 4157.0f, 4158.0f, 4159.0f, 4160.0f, 4161.0f, 4162.0f, 4163.0f, 4164.0f, 4165.0f, 4166.0f, 4167.0f, 4168.0f, 4169.0f, 4170.0f, 4171.0f, 4172.0f, 4173.0f, 4174.0f, 4175.0f, 4176.0f, 4177.0f, 4178.0f, 4179.0f, 4180.0f, 4181.0f, 4182.0f, 4183.0f, 4184.0f, 4185.0f, 4186.0f, 4187.0f, 4188.0f, 4189.0f, 4190.0f, 4191.0f, 4192.0f, 4193.0f, 4194.0f, 4195.0f, 4196.0f, 4197.0f, 4198.0f, 4199.0f, 4200.0f, 4201.0f, 4202.0f, 4203.0f, 4204.0f, 4205.0f, 4206.0f, 4207.0f, 4208.0f, 4209.0f, 4210.0f, 4211.0f, 4212.0f, 4213.0f, 4214.0f, 4215.0f, 4216.0f, 4217.0f, 4218.0f, 4219.0f, 4220.0f, 4221.0f, 4222.0f, 4223.0f, 4224.0f, 4225.0f, 4226.0f, 4227.0f, 4228.0f, 4229.0f, 4230.0f, 4231.0f, 4232.0f, 4233.0f, 4234.0f, 4235.0f, 4236.0f, 4237.0f, 4238.0f, 4239.0f, 4240.0f, 4241.0f, 4242.0f, 4243.0f, 4244.0f, 4245.0f, 4246.0f, 4247.0f, 4248.0f, 4249.0f, 4250.0f, 4251.0f, 4252.0f, 4253.0f, 4254.0f, 4255.0f, 4256.0f, 4257.0f, 4258.0f, 4259.0f, 4260.0f, 4261.0f, 4262.0f, 4263.0f, 4264.0f, 4265.0f, 4266.0f, 4267.0f, 4268.0f, 4269.0f, 4270.0f, 4271.0f, 4272.0f, 4273.0f, 4274.0f, 4275.0f, 4276.0f, 4277.0f, 4278.0f, 4279.0f, 4280.0f, 4281.0f, 4282.0f, 4283.0f, 4284.0f, 4285.0f, 4286.0f, 4287.0f, 4288.0f, 4289.0f, 4290.0f, 4291.0f, 4292.0f, 4293.0f, 4294.0f, 4295.0f, 4296.0f, 4297.0f, 4298.0f, 4299.0f, 4300.0f, 4301.0f, 4302.0f, 4303.0f, 4304.0f, 4305.0f, 4306.0f, 4307.0f, 4308.0f, 4309.0f, 4310.0f, 4311.0f, 4312.0f, 4313.0f, 4314.0f, 4315.0f, 4316.0f, 4317.0f, 4318.0f, 4319.0f, 4320.0f, 4321.0f, 4322.0f, 4323.0f, 4324.0f, 4325.0f, 4326.0f, 4327.0f, 4328.0f, 4329.0f, 4330.0f, 4331.0f, 4332.0f, 4333.0f, 4334.0f, 4335.0f, 4336.0f, 4337.0f, 4338.0f, 4339.0f, 4340.0f, 4341.0f, 4342.0f, 4343.0f, 4344.0f, 4345.0f, 4346.0f, 4347.0f, 4348.0f, 4349.0f, 4350.0f, 4351.0f, 4352.0f, 4353.0f, 4354.0f, 4355.0f, 4356.0f, 4357.0f, 4358.0f, 4359.0f, 4360.0f, 4361.0f, 4362.0f, 4363.0f, 4364.0f, 4365.0f, 4366.0f, 4367.0f, 4368.0f, 4369.0f, 4370.0f, 4371.0f, 4372.0f, 4373.0f, 4374.0f, 4375.0f, 4376.0f, 4377.0f, 4378.0f, 4379.0f, 4380.0f, 4381.0f, 4382.0f, 4383.0f, 4384.0f, 4385.0f, 4386.0f, 4387.0f, 4388.0f, 4389.0f, 4390.0f, 4391.0f, 4392.0f, 4393.0f, 4394.0f, 4395.0f, 4396.0f, 4397.0f, 4398.0f, 4399.0f, 4400.0f, 4401.0f, 4402.0f, 4403.0f, 4404.0f, 4405.0f, 4406.0f, 4407.0f, 4408.0f, 4409.0f, 4410.0f, 4411.0f, 4412.0f, 4413.0f, 4414.0f, 4415.0f, 4416.0f, 4417.0f, 4418.0f, 4419.0f, 4420.0f, 4421.0f, 4422.0f, 4423.0f, 4424.0f, 4425.0f, 4426.0f, 4427.0f, 4428.0f, 4429.0f, 4430.0f, 4431.0f, 4432.0f, 4433.0f, 4434.0f, 4435.0f, 4436.0f, 4437.0f, 4438.0f, 4439.0f, 4440.0f, 4441.0f, 4442.0f, 4443.0f, 4444.0f, 4445.0f, 4446.0f, 4447.0f, 4448.0f, 4449.0f, 4450.0f, 4451.0f, 4452.0f, 4453.0f, 4454.0f, 4455.0f, 4456.0f, 4457.0f, 4458.0f, 4459.0f, 4460.0f, 4461.0f, 4462.0f, 4463.0f, 4464.0f, 4465.0f, 4466.0f, 4467.0f, 4468.0f, 4469.0f, 4470.0f, 4471.0f, 4472.0f, 4473.0f, 4474.0f, 4475.0f, 4476.0f, 4477.0f, 4478.0f, 4479.0f, 4480.0f, 4481.0f, 4482.0f, 4483.0f, 4484.0f, 4485.0f, 4486.0f, 4487.0f, 4488.0f, 4489.0f, 4490.0f, 4491.0f, 4492.0f, 4493.0f, 4494.0f, 4495.0f, 4496.0f, 4497.0f, 4498.0f, 4499.0f, 4500.0f, 4501.0f, 4502.0f, 4503.0f, 4504.0f, 4505.0f, 4506.0f, 4507.0f, 4508.0f, 4509.0f, 4510.0f, 4511.0f, 4512.0f, 4513.0f, 4514.0f, 4515.0f, 4516.0f, 4517.0f, 4518.0f, 4519.0f, 4520.0f, 4521.0f, 4522.0f, 4523.0f, 4524.0f, 4525.0f, 4526.0f, 4527.0f, 4528.0f, 4529.0f, 4530.0f, 4531.0f, 4532.0f, 4533.0f, 4534.0f, 4535.0f, 4536.0f, 4537.0f, 4538.0f, 4539.0f, 4540.0f, 4541.0f, 4542.0f, 4543.0f, 4544.0f, 4545.0f, 4546.0f, 4547.0f, 4548.0f, 4549.0f, 4550.0f, 4551.0f, 4552.0f, 4553.0f, 4554.0f, 4555.0f, 4556.0f, 4557.0f, 4558.0f, 4559.0f, 4560.0f, 4561.0f, 4562.0f, 4563.0f, 4564.0f, 4565.0f, 4566.0f, 4567.0f, 4568.0f, 4569.0f, 4570.0f, 4571.0f, 4572.0f, 4573.0f, 4574.0f, 4575.0f, 4576.0f, 4577.0f, 4578.0f, 4579.0f, 4580.0f, 4581.0f, 4582.0f, 4583.0f, 4584.0f, 4585.0f, 4586.0f, 4587.0f, 4588.0f, 4589.0f, 4590.0f, 4591.0f, 4592.0f, 4593.0f, 4594.0f, 4595.0f, 4596.0f, 4597.0f, 4598.0f, 4599.0f, 4600.0f, 4601.0f, 4602.0f, 4603.0f, 4604.0f, 4605.0f, 4606.0f, 4607.0f, 4608.0f, 4609.0f, 4610.0f, 4611.0f, 4612.0f, 4613.0f, 4614.0f, 4615.0f, 4616.0f, 4617.0f, 4618.0f, 4619.0f, 4620.0f, 4621.0f, 4622.0f, 4623.0f, 4624.0f, 4625.0f, 4626.0f, 4627.0f, 4628.0f, 4629.0f, 4630.0f, 4631.0f, 4632.0f, 4633.0f, 4634.0f, 4635.0f, 4636.0f, 4637.0f, 4638.0f, 4639.0f, 4640.0f, 4641.0f, 4642.0f, 4643.0f, 4644.0f, 4645.0f, 4646.0f, 4647.0f, 4648.0f, 4649.0f, 4650.0f, 4651.0f, 4652.0f, 4653.0f, 4654.0f, 4655.0f, 4656.0f, 4657.0f, 4658.0f, 4659.0f, 4660.0f, 4661.0f, 4662.0f, 4663.0f, 4664.0f, 4665.0f, 4666.0f, 4667.0f, 4668.0f, 4669.0f, 4670.0f, 4671.0f, 4672.0f, 4673.0f, 4674.0f, 4675.0f, 4676.0f, 4677.0f, 4678.0f, 4679.0f, 4680.0f, 4681.0f, 4682.0f, 4683.0f, 4684.0f, 4685.0f, 4686.0f, 4687.0f, 4688.0f, 4689.0f, 4690.0f, 4691.0f, 4692.0f, 4693.0f, 4694.0f, 4695.0f, 4696.0f, 4697.0f, 4698.0f, 4699.0f, 4700.0f, 4701.0f, 4702.0f, 4703.0f, 4704.0f, 4705.0f, 4706.0f, 4707.0f, 4708.0f, 4709.0f, 4710.0f, 4711.0f, 4712.0f, 4713.0f, 4714.0f, 4715.0f, 4716.0f, 4717.0f, 4718.0f, 4719.0f, 4720.0f, 4721.0f, 4722.0f, 4723.0f, 4724.0f, 4725.0f, 4726.0f, 4727.0f, 4728.0f, 4729.0f, 4730.0f, 4731.0f, 4732.0f, 4733.0f, 4734.0f, 4735.0f, 4736.0f, 4737.0f, 4738.0f, 4739.0f, 4740.0f, 4741.0f, 4742.0f, 4743.0f, 4744.0f, 4745.0f, 4746.0f, 4747.0f, 4748.0f, 4749.0f, 4750.0f, 4751.0f, 4752.0f, 4753.0f, 4754.0f, 4755.0f, 4756.0f, 4757.0f, 4758.0f, 4759.0f, 4760.0f, 4761.0f, 4762.0f, 4763.0f, 4764.0f, 4765.0f, 4766.0f, 4767.0f, 4768.0f, 4769.0f, 4770.0f, 4771.0f, 4772.0f, 4773.0f, 4774.0f, 4775.0f, 4776.0f, 4777.0f, 4778.0f, 4779.0f, 4780.0f, 4781.0f, 4782.0f, 4783.0f, 4784.0f, 4785.0f, 4786.0f, 4787.0f, 4788.0f, 4789.0f, 4790.0f, 4791.0f, 4792.0f, 4793.0f, 4794.0f, 4795.0f, 4796.0f, 4797.0f, 4798.0f, 4799.0f, 4800.0f, 4801.0f, 4802.0f, 4803.0f, 4804.0f, 4805.0f, 4806.0f, 4807.0f, 4808.0f, 4809.0f, 4810.0f, 4811.0f, 4812.0f, 4813.0f, 4814.0f, 4815.0f, 4816.0f, 4817.0f, 4818.0f, 4819.0f, 4820.0f, 4821.0f, 4822.0f, 4823.0f, 4824.0f, 4825.0f, 4826.0f, 4827.0f, 4828.0f, 4829.0f, 4830.0f, 4831.0f, 4832.0f, 4833.0f, 4834.0f, 4835.0f, 4836.0f, 4837.0f, 4838.0f, 4839.0f, 4840.0f, 4841.0f, 4842.0f, 4843.0f, 4844.0f, 4845.0f, 4846.0f, 4847.0f, 4848.0f, 4849.0f, 4850.0f, 4851.0f, 4852.0f, 4853.0f, 4854.0f, 4855.0f, 4856.0f, 4857.0f, 4858.0f, 4859.0f, 4860.0f, 4861.0f, 4862.0f, 4863.0f, 4864.0f, 4865.0f, 4866.0f, 4867.0f, 4868.0f, 4869.0f, 4870.0f, 4871.0f, 4872.0f, 4873.0f, 4874.0f, 4875.0f, 4876.0f, 4877.0f, 4878.0f, 4879.0f, 4880.0f, 4881.0f, 4882.0f, 4883.0f, 4884.0f, 4885.0f, 4886.0f, 4887.0f, 4888.0f, 4889.0f, 4890.0f, 4891.0f, 4892.0f, 4893.0f, 4894.0f, 4895.0f, 4896.0f, 4897.0f, 4898.0f, 4899.0f, 4900.0f, 4901.0f, 4902.0f, 4903.0f, 4904.0f, 4905.0f, 4906.0f, 4907.0f, 4908.0f, 4909.0f, 4910.0f, 4911.0f, 4912.0f, 4913.0f, 4914.0f, 4915.0f, 4916.0f, 4917.0f, 4918.0f, 4919.0f, 4920.0f, 4921.0f, 4922.0f, 4923.0f, 4924.0f, 4925.0f, 4926.0f, 4927.0f, 4928.0f, 4929.0f, 4930.0f, 4931.0f, 4932.0f, 4933.0f, 4934.0f, 4935.0f, 4936.0f, 4937.0f, 4938.0f, 4939.0f, 4940.0f, 4941.0f, 4942.0f, 4943.0f, 4944.0f, 4945.0f, 4946.0f, 4947.0f, 4948.0f, 4949.0f, 4950.0f, 4951.0f, 4952.0f, 4953.0f, 4954.0f, 4955.0f, 4956.0f, 4957.0f, 4958.0f, 4959.0f, 4960.0f, 4961.0f, 4962.0f, 4963.0f, 4964.0f, 4965.0f, 4966.0f, 4967.0f, 4968.0f, 4969.0f, 4970.0f, 4971.0f, 4972.0f, 4973.0f, 4974.0f, 4975.0f, 4976.0f, 4977.0f, 4978.0f, 4979.0f, 4980.0f, 4981.0f, 4982.0f, 4983.0f, 4984.0f, 4985.0f, 4986.0f, 4987.0f, 4988.0f, 4989.0f, 4990.0f, 4991.0f, 4992.0f, 4993.0f, 4994.0f, 4995.0f, 4996.0f, 4997.0f, 4998.0f, 4999.0f, 5000.0f, 5001.0f, 5002.0f, 5003.0f, 5004.0f, 5005.0f, 5006.0f, 5007.0f, 5008.0f, 5009.0f, 5010.0f, 5011.0f, 5012.0f, 5013.0f, 5014.0f, 5015.0f, 5016.0f, 5017.0f, 5018.0f, 5019.0f, 5020.0f, 5021.0f, 5022.0f, 5023.0f, 5024.0f, 5025.0f, 5026.0f, 5027.0f, 5028.0f, 5029.0f, 5030.0f, 5031.0f, 5032.0f, 5033.0f, 5034.0f, 5035.0f, 5036.0f, 5037.0f, 5038.0f, 5039.0f, 5040.0f, 5041.0f, 5042.0f, 5043.0f, 5044.0f, 5045.0f, 5046.0f, 5047.0f, 5048.0f, 5049.0f, 5050.0f, 5051.0f, 5052.0f, 5053.0f, 5054.0f, 5055.0f, 5056.0f, 5057.0f, 5058.0f, 5059.0f, 5060.0f, 5061.0f, 5062.0f, 5063.0f, 5064.0f, 5065.0f, 5066.0f, 5067.0f, 5068.0f, 5069.0f, 5070.0f, 5071.0f, 5072.0f, 5073.0f, 5074.0f, 5075.0f, 5076.0f, 5077.0f, 5078.0f, 5079.0f, 5080.0f, 5081.0f, 5082.0f, 5083.0f, 5084.0f, 5085.0f, 5086.0f, 5087.0f, 5088.0f, 5089.0f, 5090.0f, 5091.0f, 5092.0f, 5093.0f, 5094.0f, 5095.0f, 5096.0f, 5097.0f, 5098.0f, 5099.0f, 5100.0f, 5101.0f, 5102.0f, 5103.0f, 5104.0f, 5105.0f, 5106.0f, 5107.0f, 5108.0f, 5109.0f, 5110.0f, 5111.0f, 5112.0f, 5113.0f, 5114.0f, 5115.0f, 5116.0f, 5117.0f, 5118.0f, 5119.0f, 5120.0f, 5121.0f, 5122.0f, 5123.0f, 5124.0f, 5125.0f, 5126.0f, 5127.0f, 5128.0f, 5129.0f, 5130.0f, 5131.0f, 5132.0f, 5133.0f, 5134.0f, 5135.0f, 5136.0f, 5137.0f, 5138.0f, 5139.0f, 5140.0f, 5141.0f, 5142.0f, 5143.0f, 5144.0f, 5145.0f, 5146.0f, 5147.0f, 5148.0f, 5149.0f, 5150.0f, 5151.0f, 5152.0f, 5153.0f, 5154.0f, 5155.0f, 5156.0f, 5157.0f, 5158.0f, 5159.0f, 5160.0f, 5161.0f, 5162.0f, 5163.0f, 5164.0f, 5165.0f, 5166.0f, 5167.0f, 5168.0f, 5169.0f, 5170.0f, 5171.0f, 5172.0f, 5173.0f, 5174.0f, 5175.0f, 5176.0f, 5177.0f, 5178.0f, 5179.0f, 5180.0f, 5181.0f, 5182.0f, 5183.0f, 5184.0f, 5185.0f, 5186.0f, 5187.0f, 5188.0f, 5189.0f, 5190.0f, 5191.0f, 5192.0f, 5193.0f, 5194.0f, 5195.0f, 5196.0f, 5197.0f, 5198.0f, 5199.0f, 5200.0f, 5201.0f, 5202.0f, 5203.0f, 5204.0f, 5205.0f, 5206.0f, 5207.0f, 5208.0f, 5209.0f, 5210.0f, 5211.0f, 5212.0f, 5213.0f, 5214.0f, 5215.0f, 5216.0f, 5217.0f, 5218.0f, 5219.0f, 5220.0f, 5221.0f, 5222.0f, 5223.0f, 5224.0f, 5225.0f, 5226.0f, 5227.0f, 5228.0f, 5229.0f, 5230.0f, 5231.0f, 5232.0f, 5233.0f, 5234.0f, 5235.0f, 5236.0f, 5237.0f, 5238.0f, 5239.0f, 5240.0f, 5241.0f, 5242.0f, 5243.0f, 5244.0f, 5245.0f, 5246.0f, 5247.0f, 5248.0f, 5249.0f, 5250.0f, 5251.0f, 5252.0f, 5253.0f, 5254.0f, 5255.0f, 5256.0f, 5257.0f, 5258.0f, 5259.0f, 5260.0f, 5261.0f, 5262.0f, 5263.0f, 5264.0f, 5265.0f, 5266.0f, 5267.0f, 5268.0f, 5269.0f, 5270.0f, 5271.0f, 5272.0f, 5273.0f, 5274.0f, 5275.0f, 5276.0f, 5277.0f, 5278.0f, 5279.0f, 5280.0f, 5281.0f, 5282.0f, 5283.0f, 5284.0f, 5285.0f, 5286.0f, 5287.0f, 5288.0f, 5289.0f, 5290.0f, 5291.0f, 5292.0f, 5293.0f, 5294.0f, 5295.0f, 5296.0f, 5297.0f, 5298.0f, 5299.0f, 5300.0f, 5301.0f, 5302.0f, 5303.0f, 5304.0f, 5305.0f, 5306.0f, 5307.0f, 5308.0f, 5309.0f, 5310.0f, 5311.0f, 5312.0f, 5313.0f, 5314.0f, 5315.0f, 5316.0f, 5317.0f, 5318.0f, 5319.0f, 5320.0f, 5321.0f, 5322.0f, 5323.0f, 5324.0f, 5325.0f, 5326.0f, 5327.0f, 5328.0f, 5329.0f, 5330.0f, 5331.0f, 5332.0f, 5333.0f, 5334.0f, 5335.0f, 5336.0f, 5337.0f, 5338.0f, 5339.0f, 5340.0f, 5341.0f, 5342.0f, 5343.0f, 5344.0f, 5345.0f, 5346.0f, 5347.0f, 5348.0f, 5349.0f, 5350.0f, 5351.0f, 5352.0f, 5353.0f, 5354.0f, 5355.0f, 5356.0f, 5357.0f, 5358.0f, 5359.0f, 5360.0f, 5361.0f, 5362.0f, 5363.0f, 5364.0f, 5365.0f, 5366.0f, 5367.0f, 5368.0f, 5369.0f, 5370.0f, 5371.0f, 5372.0f, 5373.0f, 5374.0f, 5375.0f, 5376.0f, 5377.0f, 5378.0f, 5379.0f, 5380.0f, 5381.0f, 5382.0f, 5383.0f, 5384.0f, 5385.0f, 5386.0f, 5387.0f, 5388.0f, 5389.0f, 5390.0f, 5391.0f, 5392.0f, 5393.0f, 5394.0f, 5395.0f, 5396.0f, 5397.0f, 5398.0f, 5399.0f, 5400.0f, 5401.0f, 5402.0f, 5403.0f, 5404.0f, 5405.0f, 5406.0f, 5407.0f, 5408.0f, 5409.0f, 5410.0f, 5411.0f, 5412.0f, 5413.0f, 5414.0f, 5415.0f, 5416.0f, 5417.0f, 5418.0f, 5419.0f, 5420.0f, 5421.0f, 5422.0f, 5423.0f, 5424.0f, 5425.0f, 5426.0f, 5427.0f, 5428.0f, 5429.0f, 5430.0f, 5431.0f, 5432.0f, 5433.0f, 5434.0f, 5435.0f, 5436.0f, 5437.0f, 5438.0f, 5439.0f, 5440.0f, 5441.0f, 5442.0f, 5443.0f, 5444.0f, 5445.0f, 5446.0f, 5447.0f, 5448.0f, 5449.0f, 5450.0f, 5451.0f, 5452.0f, 5453.0f, 5454.0f, 5455.0f, 5456.0f, 5457.0f, 5458.0f, 5459.0f, 5460.0f, 5461.0f, 5462.0f, 5463.0f, 5464.0f, 5465.0f, 5466.0f, 5467.0f, 5468.0f, 5469.0f, 5470.0f, 5471.0f, 5472.0f, 5473.0f, 5474.0f, 5475.0f, 5476.0f, 5477.0f, 5478.0f, 5479.0f, 5480.0f, 5481.0f, 5482.0f, 5483.0f, 5484.0f, 5485.0f, 5486.0f, 5487.0f, 5488.0f, 5489.0f, 5490.0f, 5491.0f, 5492.0f, 5493.0f, 5494.0f, 5495.0f, 5496.0f, 5497.0f, 5498.0f, 5499.0f, 5500.0f, 5501.0f, 5502.0f, 5503.0f, 5504.0f, 5505.0f, 5506.0f, 5507.0f, 5508.0f, 5509.0f, 5510.0f, 5511.0f, 5512.0f, 5513.0f, 5514.0f, 5515.0f, 5516.0f, 5517.0f, 5518.0f, 5519.0f, 5520.0f, 5521.0f, 5522.0f, 5523.0f, 5524.0f, 5525.0f, 5526.0f, 5527.0f, 5528.0f, 5529.0f, 5530.0f, 5531.0f, 5532.0f, 5533.0f, 5534.0f, 5535.0f, 5536.0f, 5537.0f, 5538.0f, 5539.0f, 5540.0f, 5541.0f, 5542.0f, 5543.0f, 5544.0f, 5545.0f, 5546.0f, 5547.0f, 5548.0f, 5549.0f, 5550.0f, 5551.0f, 5552.0f, 5553.0f, 5554.0f, 5555.0f, 5556.0f, 5557.0f, 5558.0f, 5559.0f, 5560.0f, 5561.0f, 5562.0f, 5563.0f, 5564.0f, 5565.0f, 5566.0f, 5567.0f, 5568.0f, 5569.0f, 5570.0f, 5571.0f, 5572.0f, 5573.0f, 5574.0f, 5575.0f, 5576.0f, 5577.0f, 5578.0f, 5579.0f, 5580.0f, 5581.0f, 5582.0f, 5583.0f, 5584.0f, 5585.0f, 5586.0f, 5587.0f, 5588.0f, 5589.0f, 5590.0f, 5591.0f, 5592.0f, 5593.0f, 5594.0f, 5595.0f, 5596.0f, 5597.0f, 5598.0f, 5599.0f, 5600.0f, 5601.0f, 5602.0f, 5603.0f, 5604.0f, 5605.0f, 5606.0f, 5607.0f, 5608.0f, 5609.0f, 5610.0f, 5611.0f, 5612.0f, 5613.0f, 5614.0f, 5615.0f, 5616.0f, 5617.0f, 5618.0f, 5619.0f, 5620.0f, 5621.0f, 5622.0f, 5623.0f, 5624.0f, 5625.0f, 5626.0f, 5627.0f, 5628.0f, 5629.0f, 5630.0f, 5631.0f, 5632.0f, 5633.0f, 5634.0f, 5635.0f, 5636.0f, 5637.0f, 5638.0f, 5639.0f, 5640.0f, 5641.0f, 5642.0f, 5643.0f, 5644.0f, 5645.0f, 5646.0f, 5647.0f, 5648.0f, 5649.0f, 5650.0f, 5651.0f, 5652.0f, 5653.0f, 5654.0f, 5655.0f, 5656.0f, 5657.0f, 5658.0f, 5659.0f, 5660.0f, 5661.0f, 5662.0f, 5663.0f, 5664.0f, 5665.0f, 5666.0f, 5667.0f, 5668.0f, 5669.0f, 5670.0f, 5671.0f, 5672.0f, 5673.0f, 5674.0f, 5675.0f, 5676.0f, 5677.0f, 5678.0f, 5679.0f, 5680.0f, 5681.0f, 5682.0f, 5683.0f, 5684.0f, 5685.0f, 5686.0f, 5687.0f, 5688.0f, 5689.0f, 5690.0f, 5691.0f, 5692.0f, 5693.0f, 5694.0f, 5695.0f, 5696.0f, 5697.0f, 5698.0f, 5699.0f, 5700.0f, 5701.0f, 5702.0f, 5703.0f, 5704.0f, 5705.0f, 5706.0f, 5707.0f, 5708.0f, 5709.0f, 5710.0f, 5711.0f, 5712.0f, 5713.0f, 5714.0f, 5715.0f, 5716.0f, 5717.0f, 5718.0f, 5719.0f, 5720.0f, 5721.0f, 5722.0f, 5723.0f, 5724.0f, 5725.0f, 5726.0f, 5727.0f, 5728.0f, 5729.0f, 5730.0f, 5731.0f, 5732.0f, 5733.0f, 5734.0f, 5735.0f, 5736.0f, 5737.0f, 5738.0f, 5739.0f, 5740.0f, 5741.0f, 5742.0f, 5743.0f, 5744.0f, 5745.0f, 5746.0f, 5747.0f, 5748.0f, 5749.0f, 5750.0f, 5751.0f, 5752.0f, 5753.0f, 5754.0f, 5755.0f, 5756.0f, 5757.0f, 5758.0f, 5759.0f, 5760.0f, 5761.0f, 5762.0f, 5763.0f, 5764.0f, 5765.0f, 5766.0f, 5767.0f, 5768.0f, 5769.0f, 5770.0f, 5771.0f, 5772.0f, 5773.0f, 5774.0f, 5775.0f, 5776.0f, 5777.0f, 5778.0f, 5779.0f, 5780.0f, 5781.0f, 5782.0f, 5783.0f, 5784.0f, 5785.0f, 5786.0f, 5787.0f, 5788.0f, 5789.0f, 5790.0f, 5791.0f, 5792.0f, 5793.0f, 5794.0f, 5795.0f, 5796.0f, 5797.0f, 5798.0f, 5799.0f, 5800.0f, 5801.0f, 5802.0f, 5803.0f, 5804.0f, 5805.0f, 5806.0f, 5807.0f, 5808.0f, 5809.0f, 5810.0f, 5811.0f, 5812.0f, 5813.0f, 5814.0f, 5815.0f, 5816.0f, 5817.0f, 5818.0f, 5819.0f, 5820.0f, 5821.0f, 5822.0f, 5823.0f, 5824.0f, 5825.0f, 5826.0f, 5827.0f, 5828.0f, 5829.0f, 5830.0f, 5831.0f, 5832.0f, 5833.0f, 5834.0f, 5835.0f, 5836.0f, 5837.0f, 5838.0f, 5839.0f, 5840.0f, 5841.0f, 5842.0f, 5843.0f, 5844.0f, 5845.0f, 5846.0f, 5847.0f, 5848.0f, 5849.0f, 5850.0f, 5851.0f, 5852.0f, 5853.0f, 5854.0f, 5855.0f, 5856.0f, 5857.0f, 5858.0f, 5859.0f, 5860.0f, 5861.0f, 5862.0f, 5863.0f, 5864.0f, 5865.0f, 5866.0f, 5867.0f, 5868.0f, 5869.0f, 5870.0f, 5871.0f, 5872.0f, 5873.0f, 5874.0f, 5875.0f, 5876.0f, 5877.0f, 5878.0f, 5879.0f, 5880.0f, 5881.0f, 5882.0f, 5883.0f, 5884.0f, 5885.0f, 5886.0f, 5887.0f, 5888.0f, 5889.0f, 5890.0f, 5891.0f, 5892.0f, 5893.0f, 5894.0f, 5895.0f, 5896.0f, 5897.0f, 5898.0f, 5899.0f, 5900.0f, 5901.0f, 5902.0f, 5903.0f, 5904.0f, 5905.0f, 5906.0f, 5907.0f, 5908.0f, 5909.0f, 5910.0f, 5911.0f, 5912.0f, 5913.0f, 5914.0f, 5915.0f, 5916.0f, 5917.0f, 5918.0f, 5919.0f, 5920.0f, 5921.0f, 5922.0f, 5923.0f, 5924.0f, 5925.0f, 5926.0f, 5927.0f, 5928.0f, 5929.0f, 5930.0f, 5931.0f, 5932.0f, 5933.0f, 5934.0f, 5935.0f, 5936.0f, 5937.0f, 5938.0f, 5939.0f, 5940.0f, 5941.0f, 5942.0f, 5943.0f, 5944.0f, 5945.0f, 5946.0f, 5947.0f, 5948.0f, 5949.0f, 5950.0f, 5951.0f, 5952.0f, 5953.0f, 5954.0f, 5955.0f, 5956.0f, 5957.0f, 5958.0f, 5959.0f, 5960.0f, 5961.0f, 5962.0f, 5963.0f, 5964.0f, 5965.0f, 5966.0f, 5967.0f, 5968.0f, 5969.0f, 5970.0f, 5971.0f, 5972.0f, 5973.0f, 5974.0f, 5975.0f, 5976.0f, 5977.0f, 5978.0f, 5979.0f, 5980.0f, 5981.0f, 5982.0f, 5983.0f, 5984.0f, 5985.0f, 5986.0f, 5987.0f, 5988.0f, 5989.0f, 5990.0f, 5991.0f, 5992.0f, 5993.0f, 5994.0f, 5995.0f, 5996.0f, 5997.0f, 5998.0f, 5999.0f, 6000.0f, 6001.0f, 6002.0f, 6003.0f, 6004.0f, 6005.0f, 6006.0f, 6007.0f, 6008.0f, 6009.0f, 6010.0f, 6011.0f, 6012.0f, 6013.0f, 6014.0f, 6015.0f, 6016.0f, 6017.0f, 6018.0f, 6019.0f, 6020.0f, 6021.0f, 6022.0f, 6023.0f, 6024.0f, 6025.0f, 6026.0f, 6027.0f, 6028.0f, 6029.0f, 6030.0f, 6031.0f, 6032.0f, 6033.0f, 6034.0f, 6035.0f, 6036.0f, 6037.0f, 6038.0f, 6039.0f, 6040.0f, 6041.0f, 6042.0f, 6043.0f, 6044.0f, 6045.0f, 6046.0f, 6047.0f, 6048.0f, 6049.0f, 6050.0f, 6051.0f, 6052.0f, 6053.0f, 6054.0f, 6055.0f, 6056.0f, 6057.0f, 6058.0f, 6059.0f, 6060.0f, 6061.0f, 6062.0f, 6063.0f, 6064.0f, 6065.0f, 6066.0f, 6067.0f, 6068.0f, 6069.0f, 6070.0f, 6071.0f, 6072.0f, 6073.0f, 6074.0f, 6075.0f, 6076.0f, 6077.0f, 6078.0f, 6079.0f, 6080.0f, 6081.0f, 6082.0f, 6083.0f, 6084.0f, 6085.0f, 6086.0f, 6087.0f, 6088.0f, 6089.0f, 6090.0f, 6091.0f, 6092.0f, 6093.0f, 6094.0f, 6095.0f, 6096.0f, 6097.0f, 6098.0f, 6099.0f, 6100.0f, 6101.0f, 6102.0f, 6103.0f, 6104.0f, 6105.0f, 6106.0f, 6107.0f, 6108.0f, 6109.0f, 6110.0f, 6111.0f, 6112.0f, 6113.0f, 6114.0f, 6115.0f, 6116.0f, 6117.0f, 6118.0f, 6119.0f, 6120.0f, 6121.0f, 6122.0f, 6123.0f, 6124.0f, 6125.0f, 6126.0f, 6127.0f, 6128.0f, 6129.0f, 6130.0f, 6131.0f, 6132.0f, 6133.0f, 6134.0f, 6135.0f, 6136.0f, 6137.0f, 6138.0f, 6139.0f, 6140.0f, 6141.0f, 6142.0f, 6143.0f, 6144.0f, 6145.0f, 6146.0f, 6147.0f, 6148.0f, 6149.0f, 6150.0f, 6151.0f, 6152.0f, 6153.0f, 6154.0f, 6155.0f, 6156.0f, 6157.0f, 6158.0f, 6159.0f, 6160.0f, 6161.0f, 6162.0f, 6163.0f, 6164.0f, 6165.0f, 6166.0f, 6167.0f, 6168.0f, 6169.0f, 6170.0f, 6171.0f, 6172.0f, 6173.0f, 6174.0f, 6175.0f, 6176.0f, 6177.0f, 6178.0f, 6179.0f, 6180.0f, 6181.0f, 6182.0f, 6183.0f, 6184.0f, 6185.0f, 6186.0f, 6187.0f, 6188.0f, 6189.0f, 6190.0f, 6191.0f, 6192.0f, 6193.0f, 6194.0f, 6195.0f, 6196.0f, 6197.0f, 6198.0f, 6199.0f, 6200.0f, 6201.0f, 6202.0f, 6203.0f, 6204.0f, 6205.0f, 6206.0f, 6207.0f, 6208.0f, 6209.0f, 6210.0f, 6211.0f, 6212.0f, 6213.0f, 6214.0f, 6215.0f, 6216.0f, 6217.0f, 6218.0f, 6219.0f, 6220.0f, 6221.0f, 6222.0f, 6223.0f, 6224.0f, 6225.0f, 6226.0f, 6227.0f, 6228.0f, 6229.0f, 6230.0f, 6231.0f, 6232.0f, 6233.0f, 6234.0f, 6235.0f, 6236.0f, 6237.0f, 6238.0f, 6239.0f, 6240.0f, 6241.0f, 6242.0f, 6243.0f, 6244.0f, 6245.0f, 6246.0f, 6247.0f, 6248.0f, 6249.0f, 6250.0f, 6251.0f, 6252.0f, 6253.0f, 6254.0f, 6255.0f, 6256.0f, 6257.0f, 6258.0f, 6259.0f, 6260.0f, 6261.0f, 6262.0f, 6263.0f, 6264.0f, 6265.0f, 6266.0f, 6267.0f, 6268.0f, 6269.0f, 6270.0f, 6271.0f, 6272.0f, 6273.0f, 6274.0f, 6275.0f, 6276.0f, 6277.0f, 6278.0f, 6279.0f, 6280.0f, 6281.0f, 6282.0f, 6283.0f, 6284.0f, 6285.0f, 6286.0f, 6287.0f, 6288.0f, 6289.0f, 6290.0f, 6291.0f, 6292.0f, 6293.0f, 6294.0f, 6295.0f, 6296.0f, 6297.0f, 6298.0f, 6299.0f, 6300.0f, 6301.0f, 6302.0f, 6303.0f, 6304.0f, 6305.0f, 6306.0f, 6307.0f, 6308.0f, 6309.0f, 6310.0f, 6311.0f, 6312.0f, 6313.0f, 6314.0f, 6315.0f, 6316.0f, 6317.0f, 6318.0f, 6319.0f, 6320.0f, 6321.0f, 6322.0f, 6323.0f, 6324.0f, 6325.0f, 6326.0f, 6327.0f, 6328.0f, 6329.0f, 6330.0f, 6331.0f, 6332.0f, 6333.0f, 6334.0f, 6335.0f, 6336.0f, 6337.0f, 6338.0f, 6339.0f, 6340.0f, 6341.0f, 6342.0f, 6343.0f, 6344.0f, 6345.0f, 6346.0f, 6347.0f, 6348.0f, 6349.0f, 6350.0f, 6351.0f, 6352.0f, 6353.0f, 6354.0f, 6355.0f, 6356.0f, 6357.0f, 6358.0f, 6359.0f, 6360.0f, 6361.0f, 6362.0f, 6363.0f, 6364.0f, 6365.0f, 6366.0f, 6367.0f, 6368.0f, 6369.0f, 6370.0f, 6371.0f, 6372.0f, 6373.0f, 6374.0f, 6375.0f, 6376.0f, 6377.0f, 6378.0f, 6379.0f, 6380.0f, 6381.0f, 6382.0f, 6383.0f, 6384.0f, 6385.0f, 6386.0f, 6387.0f, 6388.0f, 6389.0f, 6390.0f, 6391.0f, 6392.0f, 6393.0f, 6394.0f, 6395.0f, 6396.0f, 6397.0f, 6398.0f, 6399.0f, 6400.0f, 6401.0f, 6402.0f, 6403.0f, 6404.0f, 6405.0f, 6406.0f, 6407.0f, 6408.0f, 6409.0f, 6410.0f, 6411.0f, 6412.0f, 6413.0f, 6414.0f, 6415.0f, 6416.0f, 6417.0f, 6418.0f, 6419.0f, 6420.0f, 6421.0f, 6422.0f, 6423.0f, 6424.0f, 6425.0f, 6426.0f, 6427.0f, 6428.0f, 6429.0f, 6430.0f, 6431.0f, 6432.0f, 6433.0f, 6434.0f, 6435.0f, 6436.0f, 6437.0f, 6438.0f, 6439.0f, 6440.0f, 6441.0f, 6442.0f, 6443.0f, 6444.0f, 6445.0f, 6446.0f, 6447.0f, 6448.0f, 6449.0f, 6450.0f, 6451.0f, 6452.0f, 6453.0f, 6454.0f, 6455.0f, 6456.0f, 6457.0f, 6458.0f, 6459.0f, 6460.0f, 6461.0f, 6462.0f, 6463.0f, 6464.0f, 6465.0f, 6466.0f, 6467.0f, 6468.0f, 6469.0f, 6470.0f, 6471.0f, 6472.0f, 6473.0f, 6474.0f, 6475.0f, 6476.0f, 6477.0f, 6478.0f, 6479.0f, 6480.0f, 6481.0f, 6482.0f, 6483.0f, 6484.0f, 6485.0f, 6486.0f, 6487.0f, 6488.0f, 6489.0f, 6490.0f, 6491.0f, 6492.0f, 6493.0f, 6494.0f, 6495.0f, 6496.0f, 6497.0f, 6498.0f, 6499.0f, 6500.0f, 6501.0f, 6502.0f, 6503.0f, 6504.0f, 6505.0f, 6506.0f, 6507.0f, 6508.0f, 6509.0f, 6510.0f, 6511.0f, 6512.0f, 6513.0f, 6514.0f, 6515.0f, 6516.0f, 6517.0f, 6518.0f, 6519.0f, 6520.0f, 6521.0f, 6522.0f, 6523.0f, 6524.0f, 6525.0f, 6526.0f, 6527.0f, 6528.0f, 6529.0f, 6530.0f, 6531.0f, 6532.0f, 6533.0f, 6534.0f, 6535.0f, 6536.0f, 6537.0f, 6538.0f, 6539.0f, 6540.0f, 6541.0f, 6542.0f, 6543.0f, 6544.0f, 6545.0f, 6546.0f, 6547.0f, 6548.0f, 6549.0f, 6550.0f, 6551.0f, 6552.0f, 6553.0f, 6554.0f, 6555.0f, 6556.0f, 6557.0f, 6558.0f, 6559.0f, 6560.0f, 6561.0f, 6562.0f, 6563.0f, 6564.0f, 6565.0f, 6566.0f, 6567.0f, 6568.0f, 6569.0f, 6570.0f, 6571.0f, 6572.0f, 6573.0f, 6574.0f, 6575.0f, 6576.0f, 6577.0f, 6578.0f, 6579.0f, 6580.0f, 6581.0f, 6582.0f, 6583.0f, 6584.0f, 6585.0f, 6586.0f, 6587.0f, 6588.0f, 6589.0f, 6590.0f, 6591.0f, 6592.0f, 6593.0f, 6594.0f, 6595.0f, 6596.0f, 6597.0f, 6598.0f, 6599.0f, 6600.0f, 6601.0f, 6602.0f, 6603.0f, 6604.0f, 6605.0f, 6606.0f, 6607.0f, 6608.0f, 6609.0f, 6610.0f, 6611.0f, 6612.0f, 6613.0f, 6614.0f, 6615.0f, 6616.0f, 6617.0f, 6618.0f, 6619.0f, 6620.0f, 6621.0f, 6622.0f, 6623.0f, 6624.0f, 6625.0f, 6626.0f, 6627.0f, 6628.0f, 6629.0f, 6630.0f, 6631.0f, 6632.0f, 6633.0f, 6634.0f, 6635.0f, 6636.0f, 6637.0f, 6638.0f, 6639.0f, 6640.0f, 6641.0f, 6642.0f, 6643.0f, 6644.0f, 6645.0f, 6646.0f, 6647.0f, 6648.0f, 6649.0f, 6650.0f, 6651.0f, 6652.0f, 6653.0f, 6654.0f, 6655.0f, 6656.0f, 6657.0f, 6658.0f, 6659.0f, 6660.0f, 6661.0f, 6662.0f, 6663.0f, 6664.0f, 6665.0f, 6666.0f, 6667.0f, 6668.0f, 6669.0f, 6670.0f, 6671.0f, 6672.0f, 6673.0f, 6674.0f, 6675.0f, 6676.0f, 6677.0f, 6678.0f, 6679.0f, 6680.0f, 6681.0f, 6682.0f, 6683.0f, 6684.0f, 6685.0f, 6686.0f, 6687.0f, 6688.0f, 6689.0f, 6690.0f, 6691.0f, 6692.0f, 6693.0f, 6694.0f, 6695.0f, 6696.0f, 6697.0f, 6698.0f, 6699.0f, 6700.0f, 6701.0f, 6702.0f, 6703.0f, 6704.0f, 6705.0f, 6706.0f, 6707.0f, 6708.0f, 6709.0f, 6710.0f, 6711.0f, 6712.0f, 6713.0f, 6714.0f, 6715.0f, 6716.0f, 6717.0f, 6718.0f, 6719.0f, 6720.0f, 6721.0f, 6722.0f, 6723.0f, 6724.0f, 6725.0f, 6726.0f, 6727.0f, 6728.0f, 6729.0f, 6730.0f, 6731.0f, 6732.0f, 6733.0f, 6734.0f, 6735.0f, 6736.0f, 6737.0f, 6738.0f, 6739.0f, 6740.0f, 6741.0f, 6742.0f, 6743.0f, 6744.0f, 6745.0f, 6746.0f, 6747.0f, 6748.0f, 6749.0f, 6750.0f, 6751.0f, 6752.0f, 6753.0f, 6754.0f, 6755.0f, 6756.0f, 6757.0f, 6758.0f, 6759.0f, 6760.0f, 6761.0f, 6762.0f, 6763.0f, 6764.0f, 6765.0f, 6766.0f, 6767.0f, 6768.0f, 6769.0f, 6770.0f, 6771.0f, 6772.0f, 6773.0f, 6774.0f, 6775.0f, 6776.0f, 6777.0f, 6778.0f, 6779.0f, 6780.0f, 6781.0f, 6782.0f, 6783.0f, 6784.0f, 6785.0f, 6786.0f, 6787.0f, 6788.0f, 6789.0f, 6790.0f, 6791.0f, 6792.0f, 6793.0f, 6794.0f, 6795.0f, 6796.0f, 6797.0f, 6798.0f, 6799.0f, 6800.0f, 6801.0f, 6802.0f, 6803.0f, 6804.0f, 6805.0f, 6806.0f, 6807.0f, 6808.0f, 6809.0f, 6810.0f, 6811.0f, 6812.0f, 6813.0f, 6814.0f, 6815.0f, 6816.0f, 6817.0f, 6818.0f, 6819.0f, 6820.0f, 6821.0f, 6822.0f, 6823.0f, 6824.0f, 6825.0f, 6826.0f, 6827.0f, 6828.0f, 6829.0f, 6830.0f, 6831.0f, 6832.0f, 6833.0f, 6834.0f, 6835.0f, 6836.0f, 6837.0f, 6838.0f, 6839.0f, 6840.0f, 6841.0f, 6842.0f, 6843.0f, 6844.0f, 6845.0f, 6846.0f, 6847.0f, 6848.0f, 6849.0f, 6850.0f, 6851.0f, 6852.0f, 6853.0f, 6854.0f, 6855.0f, 6856.0f, 6857.0f, 6858.0f, 6859.0f, 6860.0f, 6861.0f, 6862.0f, 6863.0f, 6864.0f, 6865.0f, 6866.0f, 6867.0f, 6868.0f, 6869.0f, 6870.0f, 6871.0f, 6872.0f, 6873.0f, 6874.0f, 6875.0f, 6876.0f, 6877.0f, 6878.0f, 6879.0f, 6880.0f, 6881.0f, 6882.0f, 6883.0f, 6884.0f, 6885.0f, 6886.0f, 6887.0f, 6888.0f, 6889.0f, 6890.0f, 6891.0f, 6892.0f, 6893.0f, 6894.0f, 6895.0f, 6896.0f, 6897.0f, 6898.0f, 6899.0f, 6900.0f, 6901.0f, 6902.0f, 6903.0f, 6904.0f, 6905.0f, 6906.0f, 6907.0f, 6908.0f, 6909.0f, 6910.0f, 6911.0f, 6912.0f, 6913.0f, 6914.0f, 6915.0f, 6916.0f, 6917.0f, 6918.0f, 6919.0f, 6920.0f, 6921.0f, 6922.0f, 6923.0f, 6924.0f, 6925.0f, 6926.0f, 6927.0f, 6928.0f, 6929.0f, 6930.0f, 6931.0f, 6932.0f, 6933.0f, 6934.0f, 6935.0f, 6936.0f, 6937.0f, 6938.0f, 6939.0f, 6940.0f, 6941.0f, 6942.0f, 6943.0f, 6944.0f, 6945.0f, 6946.0f, 6947.0f, 6948.0f, 6949.0f, 6950.0f, 6951.0f, 6952.0f, 6953.0f, 6954.0f, 6955.0f, 6956.0f, 6957.0f, 6958.0f, 6959.0f, 6960.0f, 6961.0f, 6962.0f, 6963.0f, 6964.0f, 6965.0f, 6966.0f, 6967.0f, 6968.0f, 6969.0f, 6970.0f, 6971.0f, 6972.0f, 6973.0f, 6974.0f, 6975.0f, 6976.0f, 6977.0f, 6978.0f, 6979.0f, 6980.0f, 6981.0f, 6982.0f, 6983.0f, 6984.0f, 6985.0f, 6986.0f, 6987.0f, 6988.0f, 6989.0f, 6990.0f, 6991.0f, 6992.0f, 6993.0f, 6994.0f, 6995.0f, 6996.0f, 6997.0f, 6998.0f, 6999.0f, 7000.0f, 7001.0f, 7002.0f, 7003.0f, 7004.0f, 7005.0f, 7006.0f, 7007.0f, 7008.0f, 7009.0f, 7010.0f, 7011.0f, 7012.0f, 7013.0f, 7014.0f, 7015.0f, 7016.0f, 7017.0f, 7018.0f, 7019.0f, 7020.0f, 7021.0f, 7022.0f, 7023.0f, 7024.0f, 7025.0f, 7026.0f, 7027.0f, 7028.0f, 7029.0f, 7030.0f, 7031.0f, 7032.0f, 7033.0f, 7034.0f, 7035.0f, 7036.0f, 7037.0f, 7038.0f, 7039.0f, 7040.0f, 7041.0f, 7042.0f, 7043.0f, 7044.0f, 7045.0f, 7046.0f, 7047.0f, 7048.0f, 7049.0f, 7050.0f, 7051.0f, 7052.0f, 7053.0f, 7054.0f, 7055.0f, 7056.0f, 7057.0f, 7058.0f, 7059.0f, 7060.0f, 7061.0f, 7062.0f, 7063.0f, 7064.0f, 7065.0f, 7066.0f, 7067.0f, 7068.0f, 7069.0f, 7070.0f, 7071.0f, 7072.0f, 7073.0f, 7074.0f, 7075.0f, 7076.0f, 7077.0f, 7078.0f, 7079.0f, 7080.0f, 7081.0f, 7082.0f, 7083.0f, 7084.0f, 7085.0f, 7086.0f, 7087.0f, 7088.0f, 7089.0f, 7090.0f, 7091.0f, 7092.0f, 7093.0f, 7094.0f, 7095.0f, 7096.0f, 7097.0f, 7098.0f, 7099.0f, 7100.0f, 7101.0f, 7102.0f, 7103.0f, 7104.0f, 7105.0f, 7106.0f, 7107.0f, 7108.0f, 7109.0f, 7110.0f, 7111.0f, 7112.0f, 7113.0f, 7114.0f, 7115.0f, 7116.0f, 7117.0f, 7118.0f, 7119.0f, 7120.0f, 7121.0f, 7122.0f, 7123.0f, 7124.0f, 7125.0f, 7126.0f, 7127.0f, 7128.0f, 7129.0f, 7130.0f, 7131.0f, 7132.0f, 7133.0f, 7134.0f, 7135.0f, 7136.0f, 7137.0f, 7138.0f, 7139.0f, 7140.0f, 7141.0f, 7142.0f, 7143.0f, 7144.0f, 7145.0f, 7146.0f, 7147.0f, 7148.0f, 7149.0f, 7150.0f, 7151.0f, 7152.0f, 7153.0f, 7154.0f, 7155.0f, 7156.0f, 7157.0f, 7158.0f, 7159.0f, 7160.0f, 7161.0f, 7162.0f, 7163.0f, 7164.0f, 7165.0f, 7166.0f, 7167.0f, 7168.0f, 7169.0f, 7170.0f, 7171.0f, 7172.0f, 7173.0f, 7174.0f, 7175.0f, 7176.0f, 7177.0f, 7178.0f, 7179.0f, 7180.0f, 7181.0f, 7182.0f, 7183.0f, 7184.0f, 7185.0f, 7186.0f, 7187.0f, 7188.0f, 7189.0f, 7190.0f, 7191.0f, 7192.0f, 7193.0f, 7194.0f, 7195.0f, 7196.0f, 7197.0f, 7198.0f, 7199.0f, 7200.0f, 7201.0f, 7202.0f, 7203.0f, 7204.0f, 7205.0f, 7206.0f, 7207.0f, 7208.0f, 7209.0f, 7210.0f, 7211.0f, 7212.0f, 7213.0f, 7214.0f, 7215.0f, 7216.0f, 7217.0f, 7218.0f, 7219.0f, 7220.0f, 7221.0f, 7222.0f, 7223.0f, 7224.0f, 7225.0f, 7226.0f, 7227.0f, 7228.0f, 7229.0f, 7230.0f, 7231.0f, 7232.0f, 7233.0f, 7234.0f, 7235.0f, 7236.0f, 7237.0f, 7238.0f, 7239.0f, 7240.0f, 7241.0f, 7242.0f, 7243.0f, 7244.0f, 7245.0f, 7246.0f, 7247.0f, 7248.0f, 7249.0f, 7250.0f, 7251.0f, 7252.0f, 7253.0f, 7254.0f, 7255.0f, 7256.0f, 7257.0f, 7258.0f, 7259.0f, 7260.0f, 7261.0f, 7262.0f, 7263.0f, 7264.0f, 7265.0f, 7266.0f, 7267.0f, 7268.0f, 7269.0f, 7270.0f, 7271.0f, 7272.0f, 7273.0f, 7274.0f, 7275.0f, 7276.0f, 7277.0f, 7278.0f, 7279.0f, 7280.0f, 7281.0f, 7282.0f, 7283.0f, 7284.0f, 7285.0f, 7286.0f, 7287.0f, 7288.0f, 7289.0f, 7290.0f, 7291.0f, 7292.0f, 7293.0f, 7294.0f, 7295.0f, 7296.0f, 7297.0f, 7298.0f, 7299.0f, 7300.0f, 7301.0f, 7302.0f, 7303.0f, 7304.0f, 7305.0f, 7306.0f, 7307.0f, 7308.0f, 7309.0f, 7310.0f, 7311.0f, 7312.0f, 7313.0f, 7314.0f, 7315.0f, 7316.0f, 7317.0f, 7318.0f, 7319.0f, 7320.0f, 7321.0f, 7322.0f, 7323.0f, 7324.0f, 7325.0f, 7326.0f, 7327.0f, 7328.0f, 7329.0f, 7330.0f, 7331.0f, 7332.0f, 7333.0f, 7334.0f, 7335.0f, 7336.0f, 7337.0f, 7338.0f, 7339.0f, 7340.0f, 7341.0f, 7342.0f, 7343.0f, 7344.0f, 7345.0f, 7346.0f, 7347.0f, 7348.0f, 7349.0f, 7350.0f, 7351.0f, 7352.0f, 7353.0f, 7354.0f, 7355.0f, 7356.0f, 7357.0f, 7358.0f, 7359.0f, 7360.0f, 7361.0f, 7362.0f, 7363.0f, 7364.0f, 7365.0f, 7366.0f, 7367.0f, 7368.0f, 7369.0f, 7370.0f, 7371.0f, 7372.0f, 7373.0f, 7374.0f, 7375.0f, 7376.0f, 7377.0f, 7378.0f, 7379.0f, 7380.0f, 7381.0f, 7382.0f, 7383.0f, 7384.0f, 7385.0f, 7386.0f, 7387.0f, 7388.0f, 7389.0f, 7390.0f, 7391.0f, 7392.0f, 7393.0f, 7394.0f, 7395.0f, 7396.0f, 7397.0f, 7398.0f, 7399.0f, 7400.0f, 7401.0f, 7402.0f, 7403.0f, 7404.0f, 7405.0f, 7406.0f, 7407.0f, 7408.0f, 7409.0f, 7410.0f, 7411.0f, 7412.0f, 7413.0f, 7414.0f, 7415.0f, 7416.0f, 7417.0f, 7418.0f, 7419.0f, 7420.0f, 7421.0f, 7422.0f, 7423.0f, 7424.0f, 7425.0f, 7426.0f, 7427.0f, 7428.0f, 7429.0f, 7430.0f, 7431.0f, 7432.0f, 7433.0f, 7434.0f, 7435.0f, 7436.0f, 7437.0f, 7438.0f, 7439.0f, 7440.0f, 7441.0f, 7442.0f, 7443.0f, 7444.0f, 7445.0f, 7446.0f, 7447.0f, 7448.0f, 7449.0f, 7450.0f, 7451.0f, 7452.0f, 7453.0f, 7454.0f, 7455.0f, 7456.0f, 7457.0f, 7458.0f, 7459.0f, 7460.0f, 7461.0f, 7462.0f, 7463.0f, 7464.0f, 7465.0f, 7466.0f, 7467.0f, 7468.0f, 7469.0f, 7470.0f, 7471.0f, 7472.0f, 7473.0f, 7474.0f, 7475.0f, 7476.0f, 7477.0f, 7478.0f, 7479.0f, 7480.0f, 7481.0f, 7482.0f, 7483.0f, 7484.0f, 7485.0f, 7486.0f, 7487.0f, 7488.0f, 7489.0f, 7490.0f, 7491.0f, 7492.0f, 7493.0f, 7494.0f, 7495.0f, 7496.0f, 7497.0f, 7498.0f, 7499.0f, 7500.0f, 7501.0f, 7502.0f, 7503.0f, 7504.0f, 7505.0f, 7506.0f, 7507.0f, 7508.0f, 7509.0f, 7510.0f, 7511.0f, 7512.0f, 7513.0f, 7514.0f, 7515.0f, 7516.0f, 7517.0f, 7518.0f, 7519.0f, 7520.0f, 7521.0f, 7522.0f, 7523.0f, 7524.0f, 7525.0f, 7526.0f, 7527.0f, 7528.0f, 7529.0f, 7530.0f, 7531.0f, 7532.0f, 7533.0f, 7534.0f, 7535.0f, 7536.0f, 7537.0f, 7538.0f, 7539.0f, 7540.0f, 7541.0f, 7542.0f, 7543.0f, 7544.0f, 7545.0f, 7546.0f, 7547.0f, 7548.0f, 7549.0f, 7550.0f, 7551.0f, 7552.0f, 7553.0f, 7554.0f, 7555.0f, 7556.0f, 7557.0f, 7558.0f, 7559.0f, 7560.0f, 7561.0f, 7562.0f, 7563.0f, 7564.0f, 7565.0f, 7566.0f, 7567.0f, 7568.0f, 7569.0f, 7570.0f, 7571.0f, 7572.0f, 7573.0f, 7574.0f, 7575.0f, 7576.0f, 7577.0f, 7578.0f, 7579.0f, 7580.0f, 7581.0f, 7582.0f, 7583.0f, 7584.0f, 7585.0f, 7586.0f, 7587.0f, 7588.0f, 7589.0f, 7590.0f, 7591.0f, 7592.0f, 7593.0f, 7594.0f, 7595.0f, 7596.0f, 7597.0f, 7598.0f, 7599.0f, 7600.0f, 7601.0f, 7602.0f, 7603.0f, 7604.0f, 7605.0f, 7606.0f, 7607.0f, 7608.0f, 7609.0f, 7610.0f, 7611.0f, 7612.0f, 7613.0f, 7614.0f, 7615.0f, 7616.0f, 7617.0f, 7618.0f, 7619.0f, 7620.0f, 7621.0f, 7622.0f, 7623.0f, 7624.0f, 7625.0f, 7626.0f, 7627.0f, 7628.0f, 7629.0f, 7630.0f, 7631.0f, 7632.0f, 7633.0f, 7634.0f, 7635.0f, 7636.0f, 7637.0f, 7638.0f, 7639.0f, 7640.0f, 7641.0f, 7642.0f, 7643.0f, 7644.0f, 7645.0f, 7646.0f, 7647.0f, 7648.0f, 7649.0f, 7650.0f, 7651.0f, 7652.0f, 7653.0f, 7654.0f, 7655.0f, 7656.0f, 7657.0f, 7658.0f, 7659.0f, 7660.0f, 7661.0f, 7662.0f, 7663.0f, 7664.0f, 7665.0f, 7666.0f, 7667.0f, 7668.0f, 7669.0f, 7670.0f, 7671.0f, 7672.0f, 7673.0f, 7674.0f, 7675.0f, 7676.0f, 7677.0f, 7678.0f, 7679.0f, 7680.0f, 7681.0f, 7682.0f, 7683.0f, 7684.0f, 7685.0f, 7686.0f, 7687.0f, 7688.0f, 7689.0f, 7690.0f, 7691.0f, 7692.0f, 7693.0f, 7694.0f, 7695.0f, 7696.0f, 7697.0f, 7698.0f, 7699.0f, 7700.0f, 7701.0f, 7702.0f, 7703.0f, 7704.0f, 7705.0f, 7706.0f, 7707.0f, 7708.0f, 7709.0f, 7710.0f, 7711.0f, 7712.0f, 7713.0f, 7714.0f, 7715.0f, 7716.0f, 7717.0f, 7718.0f, 7719.0f, 7720.0f, 7721.0f, 7722.0f, 7723.0f, 7724.0f, 7725.0f, 7726.0f, 7727.0f, 7728.0f, 7729.0f, 7730.0f, 7731.0f, 7732.0f, 7733.0f, 7734.0f, 7735.0f, 7736.0f, 7737.0f, 7738.0f, 7739.0f, 7740.0f, 7741.0f, 7742.0f, 7743.0f, 7744.0f, 7745.0f, 7746.0f, 7747.0f, 7748.0f, 7749.0f, 7750.0f, 7751.0f, 7752.0f, 7753.0f, 7754.0f, 7755.0f, 7756.0f, 7757.0f, 7758.0f, 7759.0f, 7760.0f, 7761.0f, 7762.0f, 7763.0f, 7764.0f, 7765.0f, 7766.0f, 7767.0f, 7768.0f, 7769.0f, 7770.0f, 7771.0f, 7772.0f, 7773.0f, 7774.0f, 7775.0f, 7776.0f, 7777.0f, 7778.0f, 7779.0f, 7780.0f, 7781.0f, 7782.0f, 7783.0f, 7784.0f, 7785.0f, 7786.0f, 7787.0f, 7788.0f, 7789.0f, 7790.0f, 7791.0f, 7792.0f, 7793.0f, 7794.0f, 7795.0f, 7796.0f, 7797.0f, 7798.0f, 7799.0f, 7800.0f, 7801.0f, 7802.0f, 7803.0f, 7804.0f, 7805.0f, 7806.0f, 7807.0f, 7808.0f, 7809.0f, 7810.0f, 7811.0f, 7812.0f, 7813.0f, 7814.0f, 7815.0f, 7816.0f, 7817.0f, 7818.0f, 7819.0f, 7820.0f, 7821.0f, 7822.0f, 7823.0f, 7824.0f, 7825.0f, 7826.0f, 7827.0f, 7828.0f, 7829.0f, 7830.0f, 7831.0f, 7832.0f, 7833.0f, 7834.0f, 7835.0f, 7836.0f, 7837.0f, 7838.0f, 7839.0f, 7840.0f, 7841.0f, 7842.0f, 7843.0f, 7844.0f, 7845.0f, 7846.0f, 7847.0f, 7848.0f, 7849.0f, 7850.0f, 7851.0f, 7852.0f, 7853.0f, 7854.0f, 7855.0f, 7856.0f, 7857.0f, 7858.0f, 7859.0f, 7860.0f, 7861.0f, 7862.0f, 7863.0f, 7864.0f, 7865.0f, 7866.0f, 7867.0f, 7868.0f, 7869.0f, 7870.0f, 7871.0f, 7872.0f, 7873.0f, 7874.0f, 7875.0f, 7876.0f, 7877.0f, 7878.0f, 7879.0f, 7880.0f, 7881.0f, 7882.0f, 7883.0f, 7884.0f, 7885.0f, 7886.0f, 7887.0f, 7888.0f, 7889.0f, 7890.0f, 7891.0f, 7892.0f, 7893.0f, 7894.0f, 7895.0f, 7896.0f, 7897.0f, 7898.0f, 7899.0f, 7900.0f, 7901.0f, 7902.0f, 7903.0f, 7904.0f, 7905.0f, 7906.0f, 7907.0f, 7908.0f, 7909.0f, 7910.0f, 7911.0f, 7912.0f, 7913.0f, 7914.0f, 7915.0f, 7916.0f, 7917.0f, 7918.0f, 7919.0f, 7920.0f, 7921.0f, 7922.0f, 7923.0f, 7924.0f, 7925.0f, 7926.0f, 7927.0f, 7928.0f, 7929.0f, 7930.0f, 7931.0f, 7932.0f, 7933.0f, 7934.0f, 7935.0f, 7936.0f, 7937.0f, 7938.0f, 7939.0f, 7940.0f, 7941.0f, 7942.0f, 7943.0f, 7944.0f, 7945.0f, 7946.0f, 7947.0f, 7948.0f, 7949.0f, 7950.0f, 7951.0f, 7952.0f, 7953.0f, 7954.0f, 7955.0f, 7956.0f, 7957.0f, 7958.0f, 7959.0f, 7960.0f, 7961.0f, 7962.0f, 7963.0f, 7964.0f, 7965.0f, 7966.0f, 7967.0f, 7968.0f, 7969.0f, 7970.0f, 7971.0f, 7972.0f, 7973.0f, 7974.0f, 7975.0f, 7976.0f, 7977.0f, 7978.0f, 7979.0f, 7980.0f, 7981.0f, 7982.0f, 7983.0f, 7984.0f, 7985.0f, 7986.0f, 7987.0f, 7988.0f, 7989.0f, 7990.0f, 7991.0f, 7992.0f, 7993.0f, 7994.0f, 7995.0f, 7996.0f, 7997.0f, 7998.0f, 7999.0f, 8000.0f, 8001.0f, 8002.0f, 8003.0f, 8004.0f, 8005.0f, 8006.0f, 8007.0f, 8008.0f, 8009.0f, 8010.0f, 8011.0f, 8012.0f, 8013.0f, 8014.0f, 8015.0f, 8016.0f, 8017.0f, 8018.0f, 8019.0f, 8020.0f, 8021.0f, 8022.0f, 8023.0f, 8024.0f, 8025.0f, 8026.0f, 8027.0f, 8028.0f, 8029.0f, 8030.0f, 8031.0f, 8032.0f, 8033.0f, 8034.0f, 8035.0f, 8036.0f, 8037.0f, 8038.0f, 8039.0f, 8040.0f, 8041.0f, 8042.0f, 8043.0f, 8044.0f, 8045.0f, 8046.0f, 8047.0f, 8048.0f, 8049.0f, 8050.0f, 8051.0f, 8052.0f, 8053.0f, 8054.0f, 8055.0f, 8056.0f, 8057.0f, 8058.0f, 8059.0f, 8060.0f, 8061.0f, 8062.0f, 8063.0f, 8064.0f, 8065.0f, 8066.0f, 8067.0f, 8068.0f, 8069.0f, 8070.0f, 8071.0f, 8072.0f, 8073.0f, 8074.0f, 8075.0f, 8076.0f, 8077.0f, 8078.0f, 8079.0f, 8080.0f, 8081.0f, 8082.0f, 8083.0f, 8084.0f, 8085.0f, 8086.0f, 8087.0f, 8088.0f, 8089.0f, 8090.0f, 8091.0f, 8092.0f, 8093.0f, 8094.0f, 8095.0f, 8096.0f, 8097.0f, 8098.0f, 8099.0f, 8100.0f, 8101.0f, 8102.0f, 8103.0f, 8104.0f, 8105.0f, 8106.0f, 8107.0f, 8108.0f, 8109.0f, 8110.0f, 8111.0f, 8112.0f, 8113.0f, 8114.0f, 8115.0f, 8116.0f, 8117.0f, 8118.0f, 8119.0f, 8120.0f, 8121.0f, 8122.0f, 8123.0f, 8124.0f, 8125.0f, 8126.0f, 8127.0f, 8128.0f, 8129.0f, 8130.0f, 8131.0f, 8132.0f, 8133.0f, 8134.0f, 8135.0f, 8136.0f, 8137.0f, 8138.0f, 8139.0f, 8140.0f, 8141.0f, 8142.0f, 8143.0f, 8144.0f, 8145.0f, 8146.0f, 8147.0f, 8148.0f, 8149.0f, 8150.0f, 8151.0f, 8152.0f, 8153.0f, 8154.0f, 8155.0f, 8156.0f, 8157.0f, 8158.0f, 8159.0f, 8160.0f, 8161.0f, 8162.0f, 8163.0f, 8164.0f, 8165.0f, 8166.0f, 8167.0f, 8168.0f, 8169.0f, 8170.0f, 8171.0f, 8172.0f, 8173.0f, 8174.0f, 8175.0f, 8176.0f, 8177.0f, 8178.0f, 8179.0f, 8180.0f, 8181.0f, 8182.0f, 8183.0f, 8184.0f, 8185.0f, 8186.0f, 8187.0f, 8188.0f, 8189.0f, 8190.0f, 8191.0f, 8192.0f, 8193.0f, 8194.0f, 8195.0f, 8196.0f, 8197.0f, 8198.0f, 8199.0f, 8200.0f, 8201.0f, 8202.0f, 8203.0f, 8204.0f, 8205.0f, 8206.0f, 8207.0f, 8208.0f, 8209.0f, 8210.0f, 8211.0f, 8212.0f, 8213.0f, 8214.0f, 8215.0f, 8216.0f, 8217.0f, 8218.0f, 8219.0f, 8220.0f, 8221.0f, 8222.0f, 8223.0f, 8224.0f, 8225.0f, 8226.0f, 8227.0f, 8228.0f, 8229.0f, 8230.0f, 8231.0f, 8232.0f, 8233.0f, 8234.0f, 8235.0f, 8236.0f, 8237.0f, 8238.0f, 8239.0f, 8240.0f, 8241.0f, 8242.0f, 8243.0f, 8244.0f, 8245.0f, 8246.0f, 8247.0f, 8248.0f, 8249.0f, 8250.0f, 8251.0f, 8252.0f, 8253.0f, 8254.0f, 8255.0f, 8256.0f, 8257.0f, 8258.0f, 8259.0f, 8260.0f, 8261.0f, 8262.0f, 8263.0f, 8264.0f, 8265.0f, 8266.0f, 8267.0f, 8268.0f, 8269.0f, 8270.0f, 8271.0f, 8272.0f, 8273.0f, 8274.0f, 8275.0f, 8276.0f, 8277.0f, 8278.0f, 8279.0f, 8280.0f, 8281.0f, 8282.0f, 8283.0f, 8284.0f, 8285.0f, 8286.0f, 8287.0f, 8288.0f, 8289.0f, 8290.0f, 8291.0f, 8292.0f, 8293.0f, 8294.0f, 8295.0f, 8296.0f, 8297.0f, 8298.0f, 8299.0f, 8300.0f, 8301.0f, 8302.0f, 8303.0f, 8304.0f, 8305.0f, 8306.0f, 8307.0f, 8308.0f, 8309.0f, 8310.0f, 8311.0f, 8312.0f, 8313.0f, 8314.0f, 8315.0f, 8316.0f, 8317.0f, 8318.0f, 8319.0f, 8320.0f, 8321.0f, 8322.0f, 8323.0f, 8324.0f, 8325.0f, 8326.0f, 8327.0f, 8328.0f, 8329.0f, 8330.0f, 8331.0f, 8332.0f, 8333.0f, 8334.0f, 8335.0f, 8336.0f, 8337.0f, 8338.0f, 8339.0f, 8340.0f, 8341.0f, 8342.0f, 8343.0f, 8344.0f, 8345.0f, 8346.0f, 8347.0f, 8348.0f, 8349.0f, 8350.0f, 8351.0f, 8352.0f, 8353.0f, 8354.0f, 8355.0f, 8356.0f, 8357.0f, 8358.0f, 8359.0f, 8360.0f, 8361.0f, 8362.0f, 8363.0f, 8364.0f, 8365.0f, 8366.0f, 8367.0f, 8368.0f, 8369.0f, 8370.0f, 8371.0f, 8372.0f, 8373.0f, 8374.0f, 8375.0f, 8376.0f, 8377.0f, 8378.0f, 8379.0f, 8380.0f, 8381.0f, 8382.0f, 8383.0f, 8384.0f, 8385.0f, 8386.0f, 8387.0f, 8388.0f, 8389.0f, 8390.0f, 8391.0f, 8392.0f, 8393.0f, 8394.0f, 8395.0f, 8396.0f, 8397.0f, 8398.0f, 8399.0f, 8400.0f, 8401.0f, 8402.0f, 8403.0f, 8404.0f, 8405.0f, 8406.0f, 8407.0f, 8408.0f, 8409.0f, 8410.0f, 8411.0f, 8412.0f, 8413.0f, 8414.0f, 8415.0f, 8416.0f, 8417.0f, 8418.0f, 8419.0f, 8420.0f, 8421.0f, 8422.0f, 8423.0f, 8424.0f, 8425.0f, 8426.0f, 8427.0f, 8428.0f, 8429.0f, 8430.0f, 8431.0f, 8432.0f, 8433.0f, 8434.0f, 8435.0f, 8436.0f, 8437.0f, 8438.0f, 8439.0f, 8440.0f, 8441.0f, 8442.0f, 8443.0f, 8444.0f, 8445.0f, 8446.0f, 8447.0f, 8448.0f, 8449.0f, 8450.0f, 8451.0f, 8452.0f, 8453.0f, 8454.0f, 8455.0f, 8456.0f, 8457.0f, 8458.0f, 8459.0f, 8460.0f, 8461.0f, 8462.0f, 8463.0f, 8464.0f, 8465.0f, 8466.0f, 8467.0f, 8468.0f, 8469.0f, 8470.0f, 8471.0f, 8472.0f, 8473.0f, 8474.0f, 8475.0f, 8476.0f, 8477.0f, 8478.0f, 8479.0f, 8480.0f, 8481.0f, 8482.0f, 8483.0f, 8484.0f, 8485.0f, 8486.0f, 8487.0f, 8488.0f, 8489.0f, 8490.0f, 8491.0f, 8492.0f, 8493.0f, 8494.0f, 8495.0f, 8496.0f, 8497.0f, 8498.0f, 8499.0f, 8500.0f, 8501.0f, 8502.0f, 8503.0f, 8504.0f, 8505.0f, 8506.0f, 8507.0f, 8508.0f, 8509.0f, 8510.0f, 8511.0f, 8512.0f, 8513.0f, 8514.0f, 8515.0f, 8516.0f, 8517.0f, 8518.0f, 8519.0f, 8520.0f, 8521.0f, 8522.0f, 8523.0f, 8524.0f, 8525.0f, 8526.0f, 8527.0f, 8528.0f, 8529.0f, 8530.0f, 8531.0f, 8532.0f, 8533.0f, 8534.0f, 8535.0f, 8536.0f, 8537.0f, 8538.0f, 8539.0f, 8540.0f, 8541.0f, 8542.0f, 8543.0f, 8544.0f, 8545.0f, 8546.0f, 8547.0f, 8548.0f, 8549.0f, 8550.0f, 8551.0f, 8552.0f, 8553.0f, 8554.0f, 8555.0f, 8556.0f, 8557.0f, 8558.0f, 8559.0f, 8560.0f, 8561.0f, 8562.0f, 8563.0f, 8564.0f, 8565.0f, 8566.0f, 8567.0f, 8568.0f, 8569.0f, 8570.0f, 8571.0f, 8572.0f, 8573.0f, 8574.0f, 8575.0f, 8576.0f, 8577.0f, 8578.0f, 8579.0f, 8580.0f, 8581.0f, 8582.0f, 8583.0f, 8584.0f, 8585.0f, 8586.0f, 8587.0f, 8588.0f, 8589.0f, 8590.0f, 8591.0f, 8592.0f, 8593.0f, 8594.0f, 8595.0f, 8596.0f, 8597.0f, 8598.0f, 8599.0f, 8600.0f, 8601.0f, 8602.0f, 8603.0f, 8604.0f, 8605.0f, 8606.0f, 8607.0f, 8608.0f, 8609.0f, 8610.0f, 8611.0f, 8612.0f, 8613.0f, 8614.0f, 8615.0f, 8616.0f, 8617.0f, 8618.0f, 8619.0f, 8620.0f, 8621.0f, 8622.0f, 8623.0f, 8624.0f, 8625.0f, 8626.0f, 8627.0f, 8628.0f, 8629.0f, 8630.0f, 8631.0f, 8632.0f, 8633.0f, 8634.0f, 8635.0f, 8636.0f, 8637.0f, 8638.0f, 8639.0f, 8640.0f, 8641.0f, 8642.0f, 8643.0f, 8644.0f, 8645.0f, 8646.0f, 8647.0f, 8648.0f, 8649.0f, 8650.0f, 8651.0f, 8652.0f, 8653.0f, 8654.0f, 8655.0f, 8656.0f, 8657.0f, 8658.0f, 8659.0f, 8660.0f, 8661.0f, 8662.0f, 8663.0f, 8664.0f, 8665.0f, 8666.0f, 8667.0f, 8668.0f, 8669.0f, 8670.0f, 8671.0f, 8672.0f, 8673.0f, 8674.0f, 8675.0f, 8676.0f, 8677.0f, 8678.0f, 8679.0f, 8680.0f, 8681.0f, 8682.0f, 8683.0f, 8684.0f, 8685.0f, 8686.0f, 8687.0f, 8688.0f, 8689.0f, 8690.0f, 8691.0f, 8692.0f, 8693.0f, 8694.0f, 8695.0f, 8696.0f, 8697.0f, 8698.0f, 8699.0f, 8700.0f, 8701.0f, 8702.0f, 8703.0f, 8704.0f, 8705.0f, 8706.0f, 8707.0f, 8708.0f, 8709.0f, 8710.0f, 8711.0f, 8712.0f, 8713.0f, 8714.0f, 8715.0f, 8716.0f, 8717.0f, 8718.0f, 8719.0f, 8720.0f, 8721.0f, 8722.0f, 8723.0f, 8724.0f, 8725.0f, 8726.0f, 8727.0f, 8728.0f, 8729.0f, 8730.0f, 8731.0f, 8732.0f, 8733.0f, 8734.0f, 8735.0f, 8736.0f, 8737.0f, 8738.0f, 8739.0f, 8740.0f, 8741.0f, 8742.0f, 8743.0f, 8744.0f, 8745.0f, 8746.0f, 8747.0f, 8748.0f, 8749.0f, 8750.0f, 8751.0f, 8752.0f, 8753.0f, 8754.0f, 8755.0f, 8756.0f, 8757.0f, 8758.0f, 8759.0f, 8760.0f, 8761.0f, 8762.0f, 8763.0f, 8764.0f, 8765.0f, 8766.0f, 8767.0f, 8768.0f, 8769.0f, 8770.0f, 8771.0f, 8772.0f, 8773.0f, 8774.0f, 8775.0f, 8776.0f, 8777.0f, 8778.0f, 8779.0f, 8780.0f, 8781.0f, 8782.0f, 8783.0f, 8784.0f, 8785.0f, 8786.0f, 8787.0f, 8788.0f, 8789.0f, 8790.0f, 8791.0f, 8792.0f, 8793.0f, 8794.0f, 8795.0f, 8796.0f, 8797.0f, 8798.0f, 8799.0f, 8800.0f, 8801.0f, 8802.0f, 8803.0f, 8804.0f, 8805.0f, 8806.0f, 8807.0f, 8808.0f, 8809.0f, 8810.0f, 8811.0f, 8812.0f, 8813.0f, 8814.0f, 8815.0f, 8816.0f, 8817.0f, 8818.0f, 8819.0f, 8820.0f, 8821.0f, 8822.0f, 8823.0f, 8824.0f, 8825.0f, 8826.0f, 8827.0f, 8828.0f, 8829.0f, 8830.0f, 8831.0f, 8832.0f, 8833.0f, 8834.0f, 8835.0f, 8836.0f, 8837.0f, 8838.0f, 8839.0f, 8840.0f, 8841.0f, 8842.0f, 8843.0f, 8844.0f, 8845.0f, 8846.0f, 8847.0f, 8848.0f, 8849.0f, 8850.0f, 8851.0f, 8852.0f, 8853.0f, 8854.0f, 8855.0f, 8856.0f, 8857.0f, 8858.0f, 8859.0f, 8860.0f, 8861.0f, 8862.0f, 8863.0f, 8864.0f, 8865.0f, 8866.0f, 8867.0f, 8868.0f, 8869.0f, 8870.0f, 8871.0f, 8872.0f, 8873.0f, 8874.0f, 8875.0f, 8876.0f, 8877.0f, 8878.0f, 8879.0f, 8880.0f, 8881.0f, 8882.0f, 8883.0f, 8884.0f, 8885.0f, 8886.0f, 8887.0f, 8888.0f, 8889.0f, 8890.0f, 8891.0f, 8892.0f, 8893.0f, 8894.0f, 8895.0f, 8896.0f, 8897.0f, 8898.0f, 8899.0f, 8900.0f, 8901.0f, 8902.0f, 8903.0f, 8904.0f, 8905.0f, 8906.0f, 8907.0f, 8908.0f, 8909.0f, 8910.0f, 8911.0f, 8912.0f, 8913.0f, 8914.0f, 8915.0f, 8916.0f, 8917.0f, 8918.0f, 8919.0f, 8920.0f, 8921.0f, 8922.0f, 8923.0f, 8924.0f, 8925.0f, 8926.0f, 8927.0f, 8928.0f, 8929.0f, 8930.0f, 8931.0f, 8932.0f, 8933.0f, 8934.0f, 8935.0f, 8936.0f, 8937.0f, 8938.0f, 8939.0f, 8940.0f, 8941.0f, 8942.0f, 8943.0f, 8944.0f, 8945.0f, 8946.0f, 8947.0f, 8948.0f, 8949.0f, 8950.0f, 8951.0f, 8952.0f, 8953.0f, 8954.0f, 8955.0f, 8956.0f, 8957.0f, 8958.0f, 8959.0f, 8960.0f, 8961.0f, 8962.0f, 8963.0f, 8964.0f, 8965.0f, 8966.0f, 8967.0f, 8968.0f, 8969.0f, 8970.0f, 8971.0f, 8972.0f, 8973.0f, 8974.0f, 8975.0f, 8976.0f, 8977.0f, 8978.0f, 8979.0f, 8980.0f, 8981.0f, 8982.0f, 8983.0f, 8984.0f, 8985.0f, 8986.0f, 8987.0f, 8988.0f, 8989.0f, 8990.0f, 8991.0f, 8992.0f, 8993.0f, 8994.0f, 8995.0f, 8996.0f, 8997.0f, 8998.0f, 8999.0f, 9000.0f, 9001.0f, 9002.0f, 9003.0f, 9004.0f, 9005.0f, 9006.0f, 9007.0f, 9008.0f, 9009.0f, 9010.0f, 9011.0f, 9012.0f, 9013.0f, 9014.0f, 9015.0f, 9016.0f, 9017.0f, 9018.0f, 9019.0f, 9020.0f, 9021.0f, 9022.0f, 9023.0f, 9024.0f, 9025.0f, 9026.0f, 9027.0f, 9028.0f, 9029.0f, 9030.0f, 9031.0f, 9032.0f, 9033.0f, 9034.0f, 9035.0f, 9036.0f, 9037.0f, 9038.0f, 9039.0f, 9040.0f, 9041.0f, 9042.0f, 9043.0f, 9044.0f, 9045.0f, 9046.0f, 9047.0f, 9048.0f, 9049.0f, 9050.0f, 9051.0f, 9052.0f, 9053.0f, 9054.0f, 9055.0f, 9056.0f, 9057.0f, 9058.0f, 9059.0f, 9060.0f, 9061.0f, 9062.0f, 9063.0f, 9064.0f, 9065.0f, 9066.0f, 9067.0f, 9068.0f, 9069.0f, 9070.0f, 9071.0f, 9072.0f, 9073.0f, 9074.0f, 9075.0f, 9076.0f, 9077.0f, 9078.0f, 9079.0f, 9080.0f, 9081.0f, 9082.0f, 9083.0f, 9084.0f, 9085.0f, 9086.0f, 9087.0f, 9088.0f, 9089.0f, 9090.0f, 9091.0f, 9092.0f, 9093.0f, 9094.0f, 9095.0f, 9096.0f, 9097.0f, 9098.0f, 9099.0f, 9100.0f, 9101.0f, 9102.0f, 9103.0f, 9104.0f, 9105.0f, 9106.0f, 9107.0f, 9108.0f, 9109.0f, 9110.0f, 9111.0f, 9112.0f, 9113.0f, 9114.0f, 9115.0f, 9116.0f, 9117.0f, 9118.0f, 9119.0f, 9120.0f, 9121.0f, 9122.0f, 9123.0f, 9124.0f, 9125.0f, 9126.0f, 9127.0f, 9128.0f, 9129.0f, 9130.0f, 9131.0f, 9132.0f, 9133.0f, 9134.0f, 9135.0f, 9136.0f, 9137.0f, 9138.0f, 9139.0f, 9140.0f, 9141.0f, 9142.0f, 9143.0f, 9144.0f, 9145.0f, 9146.0f, 9147.0f, 9148.0f, 9149.0f, 9150.0f, 9151.0f, 9152.0f, 9153.0f, 9154.0f, 9155.0f, 9156.0f, 9157.0f, 9158.0f, 9159.0f, 9160.0f, 9161.0f, 9162.0f, 9163.0f, 9164.0f, 9165.0f, 9166.0f, 9167.0f, 9168.0f, 9169.0f, 9170.0f, 9171.0f, 9172.0f, 9173.0f, 9174.0f, 9175.0f, 9176.0f, 9177.0f, 9178.0f, 9179.0f, 9180.0f, 9181.0f, 9182.0f, 9183.0f, 9184.0f, 9185.0f, 9186.0f, 9187.0f, 9188.0f, 9189.0f, 9190.0f, 9191.0f, 9192.0f, 9193.0f, 9194.0f, 9195.0f, 9196.0f, 9197.0f, 9198.0f, 9199.0f, 9200.0f, 9201.0f, 9202.0f, 9203.0f, 9204.0f, 9205.0f, 9206.0f, 9207.0f, 9208.0f, 9209.0f, 9210.0f, 9211.0f, 9212.0f, 9213.0f, 9214.0f, 9215.0f, 9216.0f, 9217.0f, 9218.0f, 9219.0f, 9220.0f, 9221.0f, 9222.0f, 9223.0f, 9224.0f, 9225.0f, 9226.0f, 9227.0f, 9228.0f, 9229.0f, 9230.0f, 9231.0f, 9232.0f, 9233.0f, 9234.0f, 9235.0f, 9236.0f, 9237.0f, 9238.0f, 9239.0f, 9240.0f, 9241.0f, 9242.0f, 9243.0f, 9244.0f, 9245.0f, 9246.0f, 9247.0f, 9248.0f, 9249.0f, 9250.0f, 9251.0f, 9252.0f, 9253.0f, 9254.0f, 9255.0f, 9256.0f, 9257.0f, 9258.0f, 9259.0f, 9260.0f, 9261.0f, 9262.0f, 9263.0f, 9264.0f, 9265.0f, 9266.0f, 9267.0f, 9268.0f, 9269.0f, 9270.0f, 9271.0f, 9272.0f, 9273.0f, 9274.0f, 9275.0f, 9276.0f, 9277.0f, 9278.0f, 9279.0f, 9280.0f, 9281.0f, 9282.0f, 9283.0f, 9284.0f, 9285.0f, 9286.0f, 9287.0f, 9288.0f, 9289.0f, 9290.0f, 9291.0f, 9292.0f, 9293.0f, 9294.0f, 9295.0f, 9296.0f, 9297.0f, 9298.0f, 9299.0f, 9300.0f, 9301.0f, 9302.0f, 9303.0f, 9304.0f, 9305.0f, 9306.0f, 9307.0f, 9308.0f, 9309.0f, 9310.0f, 9311.0f, 9312.0f, 9313.0f, 9314.0f, 9315.0f, 9316.0f, 9317.0f, 9318.0f, 9319.0f, 9320.0f, 9321.0f, 9322.0f, 9323.0f, 9324.0f, 9325.0f, 9326.0f, 9327.0f, 9328.0f, 9329.0f, 9330.0f, 9331.0f, 9332.0f, 9333.0f, 9334.0f, 9335.0f, 9336.0f, 9337.0f, 9338.0f, 9339.0f, 9340.0f, 9341.0f, 9342.0f, 9343.0f, 9344.0f, 9345.0f, 9346.0f, 9347.0f, 9348.0f, 9349.0f, 9350.0f, 9351.0f, 9352.0f, 9353.0f, 9354.0f, 9355.0f, 9356.0f, 9357.0f, 9358.0f, 9359.0f, 9360.0f, 9361.0f, 9362.0f, 9363.0f, 9364.0f, 9365.0f, 9366.0f, 9367.0f, 9368.0f, 9369.0f, 9370.0f, 9371.0f, 9372.0f, 9373.0f, 9374.0f, 9375.0f, 9376.0f, 9377.0f, 9378.0f, 9379.0f, 9380.0f, 9381.0f, 9382.0f, 9383.0f, 9384.0f, 9385.0f, 9386.0f, 9387.0f, 9388.0f, 9389.0f, 9390.0f, 9391.0f, 9392.0f, 9393.0f, 9394.0f, 9395.0f, 9396.0f, 9397.0f, 9398.0f, 9399.0f, 9400.0f, 9401.0f, 9402.0f, 9403.0f, 9404.0f, 9405.0f, 9406.0f, 9407.0f, 9408.0f, 9409.0f, 9410.0f, 9411.0f, 9412.0f, 9413.0f, 9414.0f, 9415.0f, 9416.0f, 9417.0f, 9418.0f, 9419.0f, 9420.0f, 9421.0f, 9422.0f, 9423.0f, 9424.0f, 9425.0f, 9426.0f, 9427.0f, 9428.0f, 9429.0f, 9430.0f, 9431.0f, 9432.0f, 9433.0f, 9434.0f, 9435.0f, 9436.0f, 9437.0f, 9438.0f, 9439.0f, 9440.0f, 9441.0f, 9442.0f, 9443.0f, 9444.0f, 9445.0f, 9446.0f, 9447.0f, 9448.0f, 9449.0f, 9450.0f, 9451.0f, 9452.0f, 9453.0f, 9454.0f, 9455.0f, 9456.0f, 9457.0f, 9458.0f, 9459.0f, 9460.0f, 9461.0f, 9462.0f, 9463.0f, 9464.0f, 9465.0f, 9466.0f, 9467.0f, 9468.0f, 9469.0f, 9470.0f, 9471.0f, 9472.0f, 9473.0f, 9474.0f, 9475.0f, 9476.0f, 9477.0f, 9478.0f, 9479.0f, 9480.0f, 9481.0f, 9482.0f, 9483.0f, 9484.0f, 9485.0f, 9486.0f, 9487.0f, 9488.0f, 9489.0f, 9490.0f, 9491.0f, 9492.0f, 9493.0f, 9494.0f, 9495.0f, 9496.0f, 9497.0f, 9498.0f, 9499.0f, 9500.0f, 9501.0f, 9502.0f, 9503.0f, 9504.0f, 9505.0f, 9506.0f, 9507.0f, 9508.0f, 9509.0f, 9510.0f, 9511.0f, 9512.0f, 9513.0f, 9514.0f, 9515.0f, 9516.0f, 9517.0f, 9518.0f, 9519.0f, 9520.0f, 9521.0f, 9522.0f, 9523.0f, 9524.0f, 9525.0f, 9526.0f, 9527.0f, 9528.0f, 9529.0f, 9530.0f, 9531.0f, 9532.0f, 9533.0f, 9534.0f, 9535.0f, 9536.0f, 9537.0f, 9538.0f, 9539.0f, 9540.0f, 9541.0f, 9542.0f, 9543.0f, 9544.0f, 9545.0f, 9546.0f, 9547.0f, 9548.0f, 9549.0f, 9550.0f, 9551.0f, 9552.0f, 9553.0f, 9554.0f, 9555.0f, 9556.0f, 9557.0f, 9558.0f, 9559.0f, 9560.0f, 9561.0f, 9562.0f, 9563.0f, 9564.0f, 9565.0f, 9566.0f, 9567.0f, 9568.0f, 9569.0f, 9570.0f, 9571.0f, 9572.0f, 9573.0f, 9574.0f, 9575.0f, 9576.0f, 9577.0f, 9578.0f, 9579.0f, 9580.0f, 9581.0f, 9582.0f, 9583.0f, 9584.0f, 9585.0f, 9586.0f, 9587.0f, 9588.0f, 9589.0f, 9590.0f, 9591.0f, 9592.0f, 9593.0f, 9594.0f, 9595.0f, 9596.0f, 9597.0f, 9598.0f, 9599.0f, 9600.0f, 9601.0f, 9602.0f, 9603.0f, 9604.0f, 9605.0f, 9606.0f, 9607.0f, 9608.0f, 9609.0f, 9610.0f, 9611.0f, 9612.0f, 9613.0f, 9614.0f, 9615.0f, 9616.0f, 9617.0f, 9618.0f, 9619.0f, 9620.0f, 9621.0f, 9622.0f, 9623.0f, 9624.0f, 9625.0f, 9626.0f, 9627.0f, 9628.0f, 9629.0f, 9630.0f, 9631.0f, 9632.0f, 9633.0f, 9634.0f, 9635.0f, 9636.0f, 9637.0f, 9638.0f, 9639.0f, 9640.0f, 9641.0f, 9642.0f, 9643.0f, 9644.0f, 9645.0f, 9646.0f, 9647.0f, 9648.0f, 9649.0f, 9650.0f, 9651.0f, 9652.0f, 9653.0f, 9654.0f, 9655.0f, 9656.0f, 9657.0f, 9658.0f, 9659.0f, 9660.0f, 9661.0f, 9662.0f, 9663.0f, 9664.0f, 9665.0f, 9666.0f, 9667.0f, 9668.0f, 9669.0f, 9670.0f, 9671.0f, 9672.0f, 9673.0f, 9674.0f, 9675.0f, 9676.0f, 9677.0f, 9678.0f, 9679.0f, 9680.0f, 9681.0f, 9682.0f, 9683.0f, 9684.0f, 9685.0f, 9686.0f, 9687.0f, 9688.0f, 9689.0f, 9690.0f, 9691.0f, 9692.0f, 9693.0f, 9694.0f, 9695.0f, 9696.0f, 9697.0f, 9698.0f, 9699.0f, 9700.0f, 9701.0f, 9702.0f, 9703.0f, 9704.0f, 9705.0f, 9706.0f, 9707.0f, 9708.0f, 9709.0f, 9710.0f, 9711.0f, 9712.0f, 9713.0f, 9714.0f, 9715.0f, 9716.0f, 9717.0f, 9718.0f, 9719.0f, 9720.0f, 9721.0f, 9722.0f, 9723.0f, 9724.0f, 9725.0f, 9726.0f, 9727.0f, 9728.0f, 9729.0f, 9730.0f, 9731.0f, 9732.0f, 9733.0f, 9734.0f, 9735.0f, 9736.0f, 9737.0f, 9738.0f, 9739.0f, 9740.0f, 9741.0f, 9742.0f, 9743.0f, 9744.0f, 9745.0f, 9746.0f, 9747.0f, 9748.0f, 9749.0f, 9750.0f, 9751.0f, 9752.0f, 9753.0f, 9754.0f, 9755.0f, 9756.0f, 9757.0f, 9758.0f, 9759.0f, 9760.0f, 9761.0f, 9762.0f, 9763.0f, 9764.0f, 9765.0f, 9766.0f, 9767.0f, 9768.0f, 9769.0f, 9770.0f, 9771.0f, 9772.0f, 9773.0f, 9774.0f, 9775.0f, 9776.0f, 9777.0f, 9778.0f, 9779.0f, 9780.0f, 9781.0f, 9782.0f, 9783.0f, 9784.0f, 9785.0f, 9786.0f, 9787.0f, 9788.0f, 9789.0f, 9790.0f, 9791.0f, 9792.0f, 9793.0f, 9794.0f, 9795.0f, 9796.0f, 9797.0f, 9798.0f, 9799.0f, 9800.0f, 9801.0f, 9802.0f, 9803.0f, 9804.0f, 9805.0f, 9806.0f, 9807.0f, 9808.0f, 9809.0f, 9810.0f, 9811.0f, 9812.0f, 9813.0f, 9814.0f, 9815.0f, 9816.0f, 9817.0f, 9818.0f, 9819.0f, 9820.0f, 9821.0f, 9822.0f, 9823.0f, 9824.0f, 9825.0f, 9826.0f, 9827.0f, 9828.0f, 9829.0f, 9830.0f, 9831.0f, 9832.0f, 9833.0f, 9834.0f, 9835.0f, 9836.0f, 9837.0f, 9838.0f, 9839.0f, 9840.0f, 9841.0f, 9842.0f, 9843.0f, 9844.0f, 9845.0f, 9846.0f, 9847.0f, 9848.0f, 9849.0f, 9850.0f, 9851.0f, 9852.0f, 9853.0f, 9854.0f, 9855.0f, 9856.0f, 9857.0f, 9858.0f, 9859.0f, 9860.0f, 9861.0f, 9862.0f, 9863.0f, 9864.0f, 9865.0f, 9866.0f, 9867.0f, 9868.0f, 9869.0f, 9870.0f, 9871.0f, 9872.0f, 9873.0f, 9874.0f, 9875.0f, 9876.0f, 9877.0f, 9878.0f, 9879.0f, 9880.0f, 9881.0f, 9882.0f, 9883.0f, 9884.0f, 9885.0f, 9886.0f, 9887.0f, 9888.0f, 9889.0f, 9890.0f, 9891.0f, 9892.0f, 9893.0f, 9894.0f, 9895.0f, 9896.0f, 9897.0f, 9898.0f, 9899.0f, 9900.0f, 9901.0f, 9902.0f, 9903.0f, 9904.0f, 9905.0f, 9906.0f, 9907.0f, 9908.0f, 9909.0f, 9910.0f, 9911.0f, 9912.0f, 9913.0f, 9914.0f, 9915.0f, 9916.0f, 9917.0f, 9918.0f, 9919.0f, 9920.0f, 9921.0f, 9922.0f, 9923.0f, 9924.0f, 9925.0f, 9926.0f, 9927.0f, 9928.0f, 9929.0f, 9930.0f, 9931.0f, 9932.0f, 9933.0f, 9934.0f, 9935.0f, 9936.0f, 9937.0f, 9938.0f, 9939.0f, 9940.0f, 9941.0f, 9942.0f, 9943.0f, 9944.0f, 9945.0f, 9946.0f, 9947.0f, 9948.0f, 9949.0f, 9950.0f, 9951.0f, 9952.0f, 9953.0f, 9954.0f, 9955.0f, 9956.0f, 9957.0f, 9958.0f, 9959.0f, 9960.0f, 9961.0f, 9962.0f, 9963.0f, 9964.0f, 9965.0f, 9966.0f, 9967.0f, 9968.0f, 9969.0f, 9970.0f, 9971.0f, 9972.0f, 9973.0f, 9974.0f, 9975.0f, 9976.0f, 9977.0f, 9978.0f, 9979.0f, 9980.0f, 9981.0f, 9982.0f, 9983.0f, 9984.0f, 9985.0f, 9986.0f, 9987.0f, 9988.0f, 9989.0f, 9990.0f, 9991.0f, 9992.0f, 9993.0f, 9994.0f, 9995.0f, 9996.0f, 9997.0f, 9998.0f, 9999.0f, 10000.0f, 10001.0f, 10002.0f, 10003.0f, 10004.0f, 10005.0f, 10006.0f, 10007.0f, 10008.0f, 10009.0f, 10010.0f, 10011.0f, 10012.0f, 10013.0f, 10014.0f, 10015.0f, 10016.0f, 10017.0f, 10018.0f, 10019.0f, 10020.0f, 10021.0f, 10022.0f, 10023.0f, 10024.0f, 10025.0f, 10026.0f, 10027.0f, 10028.0f, 10029.0f, 10030.0f, 10031.0f, 10032.0f, 10033.0f, 10034.0f, 10035.0f, 10036.0f, 10037.0f, 10038.0f, 10039.0f, 10040.0f, 10041.0f, 10042.0f, 10043.0f, 10044.0f, 10045.0f, 10046.0f, 10047.0f, 10048.0f, 10049.0f, 10050.0f, 10051.0f, 10052.0f, 10053.0f, 10054.0f, 10055.0f, 10056.0f, 10057.0f, 10058.0f, 10059.0f, 10060.0f, 10061.0f, 10062.0f, 10063.0f, 10064.0f, 10065.0f, 10066.0f, 10067.0f, 10068.0f, 10069.0f, 10070.0f, 10071.0f, 10072.0f, 10073.0f, 10074.0f, 10075.0f, 10076.0f, 10077.0f, 10078.0f, 10079.0f, 10080.0f, 10081.0f, 10082.0f, 10083.0f, 10084.0f, 10085.0f, 10086.0f, 10087.0f, 10088.0f, 10089.0f, 10090.0f, 10091.0f, 10092.0f, 10093.0f, 10094.0f, 10095.0f, 10096.0f, 10097.0f, 10098.0f, 10099.0f, 10100.0f, 10101.0f, 10102.0f, 10103.0f, 10104.0f, 10105.0f, 10106.0f, 10107.0f, 10108.0f, 10109.0f, 10110.0f, 10111.0f, 10112.0f, 10113.0f, 10114.0f, 10115.0f, 10116.0f, 10117.0f, 10118.0f, 10119.0f, 10120.0f, 10121.0f, 10122.0f, 10123.0f, 10124.0f, 10125.0f, 10126.0f, 10127.0f, 10128.0f, 10129.0f, 10130.0f, 10131.0f, 10132.0f, 10133.0f, 10134.0f, 10135.0f, 10136.0f, 10137.0f, 10138.0f, 10139.0f, 10140.0f, 10141.0f, 10142.0f, 10143.0f, 10144.0f, 10145.0f, 10146.0f, 10147.0f, 10148.0f, 10149.0f, 10150.0f, 10151.0f, 10152.0f, 10153.0f, 10154.0f, 10155.0f, 10156.0f, 10157.0f, 10158.0f, 10159.0f, 10160.0f, 10161.0f, 10162.0f, 10163.0f, 10164.0f, 10165.0f, 10166.0f, 10167.0f, 10168.0f, 10169.0f, 10170.0f, 10171.0f, 10172.0f, 10173.0f, 10174.0f, 10175.0f, 10176.0f, 10177.0f, 10178.0f, 10179.0f, 10180.0f, 10181.0f, 10182.0f, 10183.0f, 10184.0f, 10185.0f, 10186.0f, 10187.0f, 10188.0f, 10189.0f, 10190.0f, 10191.0f, 10192.0f, 10193.0f, 10194.0f, 10195.0f, 10196.0f, 10197.0f, 10198.0f, 10199.0f, 10200.0f, 10201.0f, 10202.0f, 10203.0f, 10204.0f, 10205.0f, 10206.0f, 10207.0f, 10208.0f, 10209.0f, 10210.0f, 10211.0f, 10212.0f, 10213.0f, 10214.0f, 10215.0f, 10216.0f, 10217.0f, 10218.0f, 10219.0f, 10220.0f, 10221.0f, 10222.0f, 10223.0f, 10224.0f, 10225.0f, 10226.0f, 10227.0f, 10228.0f, 10229.0f, 10230.0f, 10231.0f, 10232.0f, 10233.0f, 10234.0f, 10235.0f, 10236.0f, 10237.0f, 10238.0f, 10239.0f, 10240.0f, 10241.0f, 10242.0f, 10243.0f, 10244.0f, 10245.0f, 10246.0f, 10247.0f, 10248.0f, 10249.0f, 10250.0f, 10251.0f, 10252.0f, 10253.0f, 10254.0f, 10255.0f, 10256.0f, 10257.0f, 10258.0f, 10259.0f, 10260.0f, 10261.0f, 10262.0f, 10263.0f, 10264.0f, 10265.0f, 10266.0f, 10267.0f, 10268.0f, 10269.0f, 10270.0f, 10271.0f, 10272.0f, 10273.0f, 10274.0f, 10275.0f, 10276.0f, 10277.0f, 10278.0f, 10279.0f, 10280.0f, 10281.0f, 10282.0f, 10283.0f, 10284.0f, 10285.0f, 10286.0f, 10287.0f, 10288.0f, 10289.0f, 10290.0f, 10291.0f, 10292.0f, 10293.0f, 10294.0f, 10295.0f, 10296.0f, 10297.0f, 10298.0f, 10299.0f, 10300.0f, 10301.0f, 10302.0f, 10303.0f, 10304.0f, 10305.0f, 10306.0f, 10307.0f, 10308.0f, 10309.0f, 10310.0f, 10311.0f, 10312.0f, 10313.0f, 10314.0f, 10315.0f, 10316.0f, 10317.0f, 10318.0f, 10319.0f, 10320.0f, 10321.0f, 10322.0f, 10323.0f, 10324.0f, 10325.0f, 10326.0f, 10327.0f, 10328.0f, 10329.0f, 10330.0f, 10331.0f, 10332.0f, 10333.0f, 10334.0f, 10335.0f, 10336.0f, 10337.0f, 10338.0f, 10339.0f, 10340.0f, 10341.0f, 10342.0f, 10343.0f, 10344.0f, 10345.0f, 10346.0f, 10347.0f, 10348.0f, 10349.0f, 10350.0f, 10351.0f, 10352.0f, 10353.0f, 10354.0f, 10355.0f, 10356.0f, 10357.0f, 10358.0f, 10359.0f, 10360.0f, 10361.0f, 10362.0f, 10363.0f, 10364.0f, 10365.0f, 10366.0f, 10367.0f, 10368.0f, 10369.0f, 10370.0f, 10371.0f, 10372.0f, 10373.0f, 10374.0f, 10375.0f, 10376.0f, 10377.0f, 10378.0f, 10379.0f, 10380.0f, 10381.0f, 10382.0f, 10383.0f, 10384.0f, 10385.0f, 10386.0f, 10387.0f, 10388.0f, 10389.0f, 10390.0f, 10391.0f, 10392.0f, 10393.0f, 10394.0f, 10395.0f, 10396.0f, 10397.0f, 10398.0f, 10399.0f, 10400.0f, 10401.0f, 10402.0f, 10403.0f, 10404.0f, 10405.0f, 10406.0f, 10407.0f, 10408.0f, 10409.0f, 10410.0f, 10411.0f, 10412.0f, 10413.0f, 10414.0f, 10415.0f, 10416.0f, 10417.0f, 10418.0f, 10419.0f, 10420.0f, 10421.0f, 10422.0f, 10423.0f, 10424.0f, 10425.0f, 10426.0f, 10427.0f, 10428.0f, 10429.0f, 10430.0f, 10431.0f, 10432.0f, 10433.0f, 10434.0f, 10435.0f, 10436.0f, 10437.0f, 10438.0f, 10439.0f, 10440.0f, 10441.0f, 10442.0f, 10443.0f, 10444.0f, 10445.0f, 10446.0f, 10447.0f, 10448.0f, 10449.0f, 10450.0f, 10451.0f, 10452.0f, 10453.0f, 10454.0f, 10455.0f, 10456.0f, 10457.0f, 10458.0f, 10459.0f, 10460.0f, 10461.0f, 10462.0f, 10463.0f, 10464.0f, 10465.0f, 10466.0f, 10467.0f, 10468.0f, 10469.0f, 10470.0f, 10471.0f, 10472.0f, 10473.0f, 10474.0f, 10475.0f, 10476.0f, 10477.0f, 10478.0f, 10479.0f, 10480.0f, 10481.0f, 10482.0f, 10483.0f, 10484.0f, 10485.0f, 10486.0f, 10487.0f, 10488.0f, 10489.0f, 10490.0f, 10491.0f, 10492.0f, 10493.0f, 10494.0f, 10495.0f, 10496.0f, 10497.0f, 10498.0f, 10499.0f, 10500.0f, 10501.0f, 10502.0f, 10503.0f, 10504.0f, 10505.0f, 10506.0f, 10507.0f, 10508.0f, 10509.0f, 10510.0f, 10511.0f, 10512.0f, 10513.0f, 10514.0f, 10515.0f, 10516.0f, 10517.0f, 10518.0f, 10519.0f, 10520.0f, 10521.0f, 10522.0f, 10523.0f, 10524.0f, 10525.0f, 10526.0f, 10527.0f, 10528.0f, 10529.0f, 10530.0f, 10531.0f, 10532.0f, 10533.0f, 10534.0f, 10535.0f, 10536.0f, 10537.0f, 10538.0f, 10539.0f, 10540.0f, 10541.0f, 10542.0f, 10543.0f, 10544.0f, 10545.0f, 10546.0f, 10547.0f, 10548.0f, 10549.0f, 10550.0f, 10551.0f, 10552.0f, 10553.0f, 10554.0f, 10555.0f, 10556.0f, 10557.0f, 10558.0f, 10559.0f, 10560.0f, 10561.0f, 10562.0f, 10563.0f, 10564.0f, 10565.0f, 10566.0f, 10567.0f, 10568.0f, 10569.0f, 10570.0f, 10571.0f, 10572.0f, 10573.0f, 10574.0f, 10575.0f, 10576.0f, 10577.0f, 10578.0f, 10579.0f, 10580.0f, 10581.0f, 10582.0f, 10583.0f, 10584.0f, 10585.0f, 10586.0f, 10587.0f, 10588.0f, 10589.0f, 10590.0f, 10591.0f, 10592.0f, 10593.0f, 10594.0f, 10595.0f, 10596.0f, 10597.0f, 10598.0f, 10599.0f, 10600.0f, 10601.0f, 10602.0f, 10603.0f, 10604.0f, 10605.0f, 10606.0f, 10607.0f, 10608.0f, 10609.0f, 10610.0f, 10611.0f, 10612.0f, 10613.0f, 10614.0f, 10615.0f, 10616.0f, 10617.0f, 10618.0f, 10619.0f, 10620.0f, 10621.0f, 10622.0f, 10623.0f, 10624.0f, 10625.0f, 10626.0f, 10627.0f, 10628.0f, 10629.0f, 10630.0f, 10631.0f, 10632.0f, 10633.0f, 10634.0f, 10635.0f, 10636.0f, 10637.0f, 10638.0f, 10639.0f, 10640.0f, 10641.0f, 10642.0f, 10643.0f, 10644.0f, 10645.0f, 10646.0f, 10647.0f, 10648.0f, 10649.0f, 10650.0f, 10651.0f, 10652.0f, 10653.0f, 10654.0f, 10655.0f, 10656.0f, 10657.0f, 10658.0f, 10659.0f, 10660.0f, 10661.0f, 10662.0f, 10663.0f, 10664.0f, 10665.0f, 10666.0f, 10667.0f, 10668.0f, 10669.0f, 10670.0f, 10671.0f, 10672.0f, 10673.0f, 10674.0f, 10675.0f, 10676.0f, 10677.0f, 10678.0f, 10679.0f, 10680.0f, 10681.0f, 10682.0f, 10683.0f, 10684.0f, 10685.0f, 10686.0f, 10687.0f, 10688.0f, 10689.0f, 10690.0f, 10691.0f, 10692.0f, 10693.0f, 10694.0f, 10695.0f, 10696.0f, 10697.0f, 10698.0f, 10699.0f, 10700.0f, 10701.0f, 10702.0f, 10703.0f, 10704.0f, 10705.0f, 10706.0f, 10707.0f, 10708.0f, 10709.0f, 10710.0f, 10711.0f, 10712.0f, 10713.0f, 10714.0f, 10715.0f, 10716.0f, 10717.0f, 10718.0f, 10719.0f, 10720.0f, 10721.0f, 10722.0f, 10723.0f, 10724.0f, 10725.0f, 10726.0f, 10727.0f, 10728.0f, 10729.0f, 10730.0f, 10731.0f, 10732.0f, 10733.0f, 10734.0f, 10735.0f, 10736.0f, 10737.0f, 10738.0f, 10739.0f, 10740.0f, 10741.0f, 10742.0f, 10743.0f, 10744.0f, 10745.0f, 10746.0f, 10747.0f, 10748.0f, 10749.0f, 10750.0f, 10751.0f, 10752.0f, 10753.0f, 10754.0f, 10755.0f, 10756.0f, 10757.0f, 10758.0f, 10759.0f, 10760.0f, 10761.0f, 10762.0f, 10763.0f, 10764.0f, 10765.0f, 10766.0f, 10767.0f, 10768.0f, 10769.0f, 10770.0f, 10771.0f, 10772.0f, 10773.0f, 10774.0f, 10775.0f, 10776.0f, 10777.0f, 10778.0f, 10779.0f, 10780.0f, 10781.0f, 10782.0f, 10783.0f, 10784.0f, 10785.0f, 10786.0f, 10787.0f, 10788.0f, 10789.0f, 10790.0f, 10791.0f, 10792.0f, 10793.0f, 10794.0f, 10795.0f, 10796.0f, 10797.0f, 10798.0f, 10799.0f, 10800.0f, 10801.0f, 10802.0f, 10803.0f, 10804.0f, 10805.0f, 10806.0f, 10807.0f, 10808.0f, 10809.0f, 10810.0f, 10811.0f, 10812.0f, 10813.0f, 10814.0f, 10815.0f, 10816.0f, 10817.0f, 10818.0f, 10819.0f, 10820.0f, 10821.0f, 10822.0f, 10823.0f, 10824.0f, 10825.0f, 10826.0f, 10827.0f, 10828.0f, 10829.0f, 10830.0f, 10831.0f, 10832.0f, 10833.0f, 10834.0f, 10835.0f, 10836.0f, 10837.0f, 10838.0f, 10839.0f, 10840.0f, 10841.0f, 10842.0f, 10843.0f, 10844.0f, 10845.0f, 10846.0f, 10847.0f, 10848.0f, 10849.0f, 10850.0f, 10851.0f, 10852.0f, 10853.0f, 10854.0f, 10855.0f, 10856.0f, 10857.0f, 10858.0f, 10859.0f, 10860.0f, 10861.0f, 10862.0f, 10863.0f, 10864.0f, 10865.0f, 10866.0f, 10867.0f, 10868.0f, 10869.0f, 10870.0f, 10871.0f, 10872.0f, 10873.0f, 10874.0f, 10875.0f, 10876.0f, 10877.0f, 10878.0f, 10879.0f, 10880.0f, 10881.0f, 10882.0f, 10883.0f, 10884.0f, 10885.0f, 10886.0f, 10887.0f, 10888.0f, 10889.0f, 10890.0f, 10891.0f, 10892.0f, 10893.0f, 10894.0f, 10895.0f, 10896.0f, 10897.0f, 10898.0f, 10899.0f, 10900.0f, 10901.0f, 10902.0f, 10903.0f, 10904.0f, 10905.0f, 10906.0f, 10907.0f, 10908.0f, 10909.0f, 10910.0f, 10911.0f, 10912.0f, 10913.0f, 10914.0f, 10915.0f, 10916.0f, 10917.0f, 10918.0f, 10919.0f, 10920.0f, 10921.0f, 10922.0f, 10923.0f, 10924.0f, 10925.0f, 10926.0f, 10927.0f, 10928.0f, 10929.0f, 10930.0f, 10931.0f, 10932.0f, 10933.0f, 10934.0f, 10935.0f, 10936.0f, 10937.0f, 10938.0f, 10939.0f, 10940.0f, 10941.0f, 10942.0f, 10943.0f, 10944.0f, 10945.0f, 10946.0f, 10947.0f, 10948.0f, 10949.0f, 10950.0f, 10951.0f, 10952.0f, 10953.0f, 10954.0f, 10955.0f, 10956.0f, 10957.0f, 10958.0f, 10959.0f, 10960.0f, 10961.0f, 10962.0f, 10963.0f, 10964.0f, 10965.0f, 10966.0f, 10967.0f, 10968.0f, 10969.0f, 10970.0f, 10971.0f, 10972.0f, 10973.0f, 10974.0f, 10975.0f, 10976.0f, 10977.0f, 10978.0f, 10979.0f, 10980.0f, 10981.0f, 10982.0f, 10983.0f, 10984.0f, 10985.0f, 10986.0f, 10987.0f, 10988.0f, 10989.0f, 10990.0f, 10991.0f, 10992.0f, 10993.0f, 10994.0f, 10995.0f, 10996.0f, 10997.0f, 10998.0f, 10999.0f, 11000.0f, 11001.0f, 11002.0f, 11003.0f, 11004.0f, 11005.0f, 11006.0f, 11007.0f, 11008.0f, 11009.0f, 11010.0f, 11011.0f, 11012.0f, 11013.0f, 11014.0f, 11015.0f, 11016.0f, 11017.0f, 11018.0f, 11019.0f, 11020.0f, 11021.0f, 11022.0f, 11023.0f, 11024.0f, 11025.0f, 11026.0f, 11027.0f, 11028.0f, 11029.0f, 11030.0f, 11031.0f, 11032.0f, 11033.0f, 11034.0f, 11035.0f, 11036.0f, 11037.0f, 11038.0f, 11039.0f, 11040.0f, 11041.0f, 11042.0f, 11043.0f, 11044.0f, 11045.0f, 11046.0f, 11047.0f, 11048.0f, 11049.0f, 11050.0f, 11051.0f, 11052.0f, 11053.0f, 11054.0f, 11055.0f, 11056.0f, 11057.0f, 11058.0f, 11059.0f, 11060.0f, 11061.0f, 11062.0f, 11063.0f, 11064.0f, 11065.0f, 11066.0f, 11067.0f, 11068.0f, 11069.0f, 11070.0f, 11071.0f, 11072.0f, 11073.0f, 11074.0f, 11075.0f, 11076.0f, 11077.0f, 11078.0f, 11079.0f, 11080.0f, 11081.0f, 11082.0f, 11083.0f, 11084.0f, 11085.0f, 11086.0f, 11087.0f, 11088.0f, 11089.0f, 11090.0f, 11091.0f, 11092.0f, 11093.0f, 11094.0f, 11095.0f, 11096.0f, 11097.0f, 11098.0f, 11099.0f, 11100.0f, 11101.0f, 11102.0f, 11103.0f, 11104.0f, 11105.0f, 11106.0f, 11107.0f, 11108.0f, 11109.0f, 11110.0f, 11111.0f, 11112.0f, 11113.0f, 11114.0f, 11115.0f, 11116.0f, 11117.0f, 11118.0f, 11119.0f, 11120.0f, 11121.0f, 11122.0f, 11123.0f, 11124.0f, 11125.0f, 11126.0f, 11127.0f, 11128.0f, 11129.0f, 11130.0f, 11131.0f, 11132.0f, 11133.0f, 11134.0f, 11135.0f, 11136.0f, 11137.0f, 11138.0f, 11139.0f, 11140.0f, 11141.0f, 11142.0f, 11143.0f, 11144.0f, 11145.0f, 11146.0f, 11147.0f, 11148.0f, 11149.0f, 11150.0f, 11151.0f, 11152.0f, 11153.0f, 11154.0f, 11155.0f, 11156.0f, 11157.0f, 11158.0f, 11159.0f, 11160.0f, 11161.0f, 11162.0f, 11163.0f, 11164.0f, 11165.0f, 11166.0f, 11167.0f, 11168.0f, 11169.0f, 11170.0f, 11171.0f, 11172.0f, 11173.0f, 11174.0f, 11175.0f, 11176.0f, 11177.0f, 11178.0f, 11179.0f, 11180.0f, 11181.0f, 11182.0f, 11183.0f, 11184.0f, 11185.0f, 11186.0f, 11187.0f, 11188.0f, 11189.0f, 11190.0f, 11191.0f, 11192.0f, 11193.0f, 11194.0f, 11195.0f, 11196.0f, 11197.0f, 11198.0f, 11199.0f, 11200.0f, 11201.0f, 11202.0f, 11203.0f, 11204.0f, 11205.0f, 11206.0f, 11207.0f, 11208.0f, 11209.0f, 11210.0f, 11211.0f, 11212.0f, 11213.0f, 11214.0f, 11215.0f, 11216.0f, 11217.0f, 11218.0f, 11219.0f, 11220.0f, 11221.0f, 11222.0f, 11223.0f, 11224.0f, 11225.0f, 11226.0f, 11227.0f, 11228.0f, 11229.0f, 11230.0f, 11231.0f, 11232.0f, 11233.0f, 11234.0f, 11235.0f, 11236.0f, 11237.0f, 11238.0f, 11239.0f, 11240.0f, 11241.0f, 11242.0f, 11243.0f, 11244.0f, 11245.0f, 11246.0f, 11247.0f, 11248.0f, 11249.0f, 11250.0f, 11251.0f, 11252.0f, 11253.0f, 11254.0f, 11255.0f, 11256.0f, 11257.0f, 11258.0f, 11259.0f, 11260.0f, 11261.0f, 11262.0f, 11263.0f, 11264.0f, 11265.0f, 11266.0f, 11267.0f, 11268.0f, 11269.0f, 11270.0f, 11271.0f, 11272.0f, 11273.0f, 11274.0f, 11275.0f, 11276.0f, 11277.0f, 11278.0f, 11279.0f, 11280.0f, 11281.0f, 11282.0f, 11283.0f, 11284.0f, 11285.0f, 11286.0f, 11287.0f, 11288.0f, 11289.0f, 11290.0f, 11291.0f, 11292.0f, 11293.0f, 11294.0f, 11295.0f, 11296.0f, 11297.0f, 11298.0f, 11299.0f, 11300.0f, 11301.0f, 11302.0f, 11303.0f, 11304.0f, 11305.0f, 11306.0f, 11307.0f, 11308.0f, 11309.0f, 11310.0f, 11311.0f, 11312.0f, 11313.0f, 11314.0f, 11315.0f, 11316.0f, 11317.0f, 11318.0f, 11319.0f, 11320.0f, 11321.0f, 11322.0f, 11323.0f, 11324.0f, 11325.0f, 11326.0f, 11327.0f, 11328.0f, 11329.0f, 11330.0f, 11331.0f, 11332.0f, 11333.0f, 11334.0f, 11335.0f, 11336.0f, 11337.0f, 11338.0f, 11339.0f, 11340.0f, 11341.0f, 11342.0f, 11343.0f, 11344.0f, 11345.0f, 11346.0f, 11347.0f, 11348.0f, 11349.0f, 11350.0f, 11351.0f, 11352.0f, 11353.0f, 11354.0f, 11355.0f, 11356.0f, 11357.0f, 11358.0f, 11359.0f, 11360.0f, 11361.0f, 11362.0f, 11363.0f, 11364.0f, 11365.0f, 11366.0f, 11367.0f, 11368.0f, 11369.0f, 11370.0f, 11371.0f, 11372.0f, 11373.0f, 11374.0f, 11375.0f, 11376.0f, 11377.0f, 11378.0f, 11379.0f, 11380.0f, 11381.0f, 11382.0f, 11383.0f, 11384.0f, 11385.0f, 11386.0f, 11387.0f, 11388.0f, 11389.0f, 11390.0f, 11391.0f, 11392.0f, 11393.0f, 11394.0f, 11395.0f, 11396.0f, 11397.0f, 11398.0f, 11399.0f, 11400.0f, 11401.0f, 11402.0f, 11403.0f, 11404.0f, 11405.0f, 11406.0f, 11407.0f, 11408.0f, 11409.0f, 11410.0f, 11411.0f, 11412.0f, 11413.0f, 11414.0f, 11415.0f, 11416.0f, 11417.0f, 11418.0f, 11419.0f, 11420.0f, 11421.0f, 11422.0f, 11423.0f, 11424.0f, 11425.0f, 11426.0f, 11427.0f, 11428.0f, 11429.0f, 11430.0f, 11431.0f, 11432.0f, 11433.0f, 11434.0f, 11435.0f, 11436.0f, 11437.0f, 11438.0f, 11439.0f, 11440.0f, 11441.0f, 11442.0f, 11443.0f, 11444.0f, 11445.0f, 11446.0f, 11447.0f, 11448.0f, 11449.0f, 11450.0f, 11451.0f, 11452.0f, 11453.0f, 11454.0f, 11455.0f, 11456.0f, 11457.0f, 11458.0f, 11459.0f, 11460.0f, 11461.0f, 11462.0f, 11463.0f, 11464.0f, 11465.0f, 11466.0f, 11467.0f, 11468.0f, 11469.0f, 11470.0f, 11471.0f, 11472.0f, 11473.0f, 11474.0f, 11475.0f, 11476.0f, 11477.0f, 11478.0f, 11479.0f, 11480.0f, 11481.0f, 11482.0f, 11483.0f, 11484.0f, 11485.0f, 11486.0f, 11487.0f, 11488.0f, 11489.0f, 11490.0f, 11491.0f, 11492.0f, 11493.0f, 11494.0f, 11495.0f, 11496.0f, 11497.0f, 11498.0f, 11499.0f, 11500.0f, 11501.0f, 11502.0f, 11503.0f, 11504.0f, 11505.0f, 11506.0f, 11507.0f, 11508.0f, 11509.0f, 11510.0f, 11511.0f, 11512.0f, 11513.0f, 11514.0f, 11515.0f, 11516.0f, 11517.0f, 11518.0f, 11519.0f, 11520.0f, 11521.0f, 11522.0f, 11523.0f, 11524.0f, 11525.0f, 11526.0f, 11527.0f, 11528.0f, 11529.0f, 11530.0f, 11531.0f, 11532.0f, 11533.0f, 11534.0f, 11535.0f, 11536.0f, 11537.0f, 11538.0f, 11539.0f, 11540.0f, 11541.0f, 11542.0f, 11543.0f, 11544.0f, 11545.0f, 11546.0f, 11547.0f, 11548.0f, 11549.0f, 11550.0f, 11551.0f, 11552.0f, 11553.0f, 11554.0f, 11555.0f, 11556.0f, 11557.0f, 11558.0f, 11559.0f, 11560.0f, 11561.0f, 11562.0f, 11563.0f, 11564.0f, 11565.0f, 11566.0f, 11567.0f, 11568.0f, 11569.0f, 11570.0f, 11571.0f, 11572.0f, 11573.0f, 11574.0f, 11575.0f, 11576.0f, 11577.0f, 11578.0f, 11579.0f, 11580.0f, 11581.0f, 11582.0f, 11583.0f, 11584.0f, 11585.0f, 11586.0f, 11587.0f, 11588.0f, 11589.0f, 11590.0f, 11591.0f, 11592.0f, 11593.0f, 11594.0f, 11595.0f, 11596.0f, 11597.0f, 11598.0f, 11599.0f, 11600.0f, 11601.0f, 11602.0f, 11603.0f, 11604.0f, 11605.0f, 11606.0f, 11607.0f, 11608.0f, 11609.0f, 11610.0f, 11611.0f, 11612.0f, 11613.0f, 11614.0f, 11615.0f, 11616.0f, 11617.0f, 11618.0f, 11619.0f, 11620.0f, 11621.0f, 11622.0f, 11623.0f, 11624.0f, 11625.0f, 11626.0f, 11627.0f, 11628.0f, 11629.0f, 11630.0f, 11631.0f, 11632.0f, 11633.0f, 11634.0f, 11635.0f, 11636.0f, 11637.0f, 11638.0f, 11639.0f, 11640.0f, 11641.0f, 11642.0f, 11643.0f, 11644.0f, 11645.0f, 11646.0f, 11647.0f, 11648.0f, 11649.0f, 11650.0f, 11651.0f, 11652.0f, 11653.0f, 11654.0f, 11655.0f, 11656.0f, 11657.0f, 11658.0f, 11659.0f, 11660.0f, 11661.0f, 11662.0f, 11663.0f, 11664.0f, 11665.0f, 11666.0f, 11667.0f, 11668.0f, 11669.0f, 11670.0f, 11671.0f, 11672.0f, 11673.0f, 11674.0f, 11675.0f, 11676.0f, 11677.0f, 11678.0f, 11679.0f, 11680.0f, 11681.0f, 11682.0f, 11683.0f, 11684.0f, 11685.0f, 11686.0f, 11687.0f, 11688.0f, 11689.0f, 11690.0f, 11691.0f, 11692.0f, 11693.0f, 11694.0f, 11695.0f, 11696.0f, 11697.0f, 11698.0f, 11699.0f, 11700.0f, 11701.0f, 11702.0f, 11703.0f, 11704.0f, 11705.0f, 11706.0f, 11707.0f, 11708.0f, 11709.0f, 11710.0f, 11711.0f, 11712.0f, 11713.0f, 11714.0f, 11715.0f, 11716.0f, 11717.0f, 11718.0f, 11719.0f, 11720.0f, 11721.0f, 11722.0f, 11723.0f, 11724.0f, 11725.0f, 11726.0f, 11727.0f, 11728.0f, 11729.0f, 11730.0f, 11731.0f, 11732.0f, 11733.0f, 11734.0f, 11735.0f, 11736.0f, 11737.0f, 11738.0f, 11739.0f, 11740.0f, 11741.0f, 11742.0f, 11743.0f, 11744.0f, 11745.0f, 11746.0f, 11747.0f, 11748.0f, 11749.0f, 11750.0f, 11751.0f, 11752.0f, 11753.0f, 11754.0f, 11755.0f, 11756.0f, 11757.0f, 11758.0f, 11759.0f, 11760.0f, 11761.0f, 11762.0f, 11763.0f, 11764.0f, 11765.0f, 11766.0f, 11767.0f, 11768.0f, 11769.0f, 11770.0f, 11771.0f, 11772.0f, 11773.0f, 11774.0f, 11775.0f, 11776.0f, 11777.0f, 11778.0f, 11779.0f, 11780.0f, 11781.0f, 11782.0f, 11783.0f, 11784.0f, 11785.0f, 11786.0f, 11787.0f, 11788.0f, 11789.0f, 11790.0f, 11791.0f, 11792.0f, 11793.0f, 11794.0f, 11795.0f, 11796.0f, 11797.0f, 11798.0f, 11799.0f, 11800.0f, 11801.0f, 11802.0f, 11803.0f, 11804.0f, 11805.0f, 11806.0f, 11807.0f, 11808.0f, 11809.0f, 11810.0f, 11811.0f, 11812.0f, 11813.0f, 11814.0f, 11815.0f, 11816.0f, 11817.0f, 11818.0f, 11819.0f, 11820.0f, 11821.0f, 11822.0f, 11823.0f, 11824.0f, 11825.0f, 11826.0f, 11827.0f, 11828.0f, 11829.0f, 11830.0f, 11831.0f, 11832.0f, 11833.0f, 11834.0f, 11835.0f, 11836.0f, 11837.0f, 11838.0f, 11839.0f, 11840.0f, 11841.0f, 11842.0f, 11843.0f, 11844.0f, 11845.0f, 11846.0f, 11847.0f, 11848.0f, 11849.0f, 11850.0f, 11851.0f, 11852.0f, 11853.0f, 11854.0f, 11855.0f, 11856.0f, 11857.0f, 11858.0f, 11859.0f, 11860.0f, 11861.0f, 11862.0f, 11863.0f, 11864.0f, 11865.0f, 11866.0f, 11867.0f, 11868.0f, 11869.0f, 11870.0f, 11871.0f, 11872.0f, 11873.0f, 11874.0f, 11875.0f, 11876.0f, 11877.0f, 11878.0f, 11879.0f, 11880.0f, 11881.0f, 11882.0f, 11883.0f, 11884.0f, 11885.0f, 11886.0f, 11887.0f, 11888.0f, 11889.0f, 11890.0f, 11891.0f, 11892.0f, 11893.0f, 11894.0f, 11895.0f, 11896.0f, 11897.0f, 11898.0f, 11899.0f, 11900.0f, 11901.0f, 11902.0f, 11903.0f, 11904.0f, 11905.0f, 11906.0f, 11907.0f, 11908.0f, 11909.0f, 11910.0f, 11911.0f, 11912.0f, 11913.0f, 11914.0f, 11915.0f, 11916.0f, 11917.0f, 11918.0f, 11919.0f, 11920.0f, 11921.0f, 11922.0f, 11923.0f, 11924.0f, 11925.0f, 11926.0f, 11927.0f, 11928.0f, 11929.0f, 11930.0f, 11931.0f, 11932.0f, 11933.0f, 11934.0f, 11935.0f, 11936.0f, 11937.0f, 11938.0f, 11939.0f, 11940.0f, 11941.0f, 11942.0f, 11943.0f, 11944.0f, 11945.0f, 11946.0f, 11947.0f, 11948.0f, 11949.0f, 11950.0f, 11951.0f, 11952.0f, 11953.0f, 11954.0f, 11955.0f, 11956.0f, 11957.0f, 11958.0f, 11959.0f, 11960.0f, 11961.0f, 11962.0f, 11963.0f, 11964.0f, 11965.0f, 11966.0f, 11967.0f, 11968.0f, 11969.0f, 11970.0f, 11971.0f, 11972.0f, 11973.0f, 11974.0f, 11975.0f, 11976.0f, 11977.0f, 11978.0f, 11979.0f, 11980.0f, 11981.0f, 11982.0f, 11983.0f, 11984.0f, 11985.0f, 11986.0f, 11987.0f, 11988.0f, 11989.0f, 11990.0f, 11991.0f, 11992.0f, 11993.0f, 11994.0f, 11995.0f, 11996.0f, 11997.0f, 11998.0f, 11999.0f, 12000.0f, 12001.0f, 12002.0f, 12003.0f, 12004.0f, 12005.0f, 12006.0f, 12007.0f, 12008.0f, 12009.0f, 12010.0f, 12011.0f, 12012.0f, 12013.0f, 12014.0f, 12015.0f, 12016.0f, 12017.0f, 12018.0f, 12019.0f, 12020.0f, 12021.0f, 12022.0f, 12023.0f, 12024.0f, 12025.0f, 12026.0f, 12027.0f, 12028.0f, 12029.0f, 12030.0f, 12031.0f, 12032.0f, 12033.0f, 12034.0f, 12035.0f, 12036.0f, 12037.0f, 12038.0f, 12039.0f, 12040.0f, 12041.0f, 12042.0f, 12043.0f, 12044.0f, 12045.0f, 12046.0f, 12047.0f, 12048.0f, 12049.0f, 12050.0f, 12051.0f, 12052.0f, 12053.0f, 12054.0f, 12055.0f, 12056.0f, 12057.0f, 12058.0f, 12059.0f, 12060.0f, 12061.0f, 12062.0f, 12063.0f, 12064.0f, 12065.0f, 12066.0f, 12067.0f, 12068.0f, 12069.0f, 12070.0f, 12071.0f, 12072.0f, 12073.0f, 12074.0f, 12075.0f, 12076.0f, 12077.0f, 12078.0f, 12079.0f, 12080.0f, 12081.0f, 12082.0f, 12083.0f, 12084.0f, 12085.0f, 12086.0f, 12087.0f, 12088.0f, 12089.0f, 12090.0f, 12091.0f, 12092.0f, 12093.0f, 12094.0f, 12095.0f, 12096.0f, 12097.0f, 12098.0f, 12099.0f, 12100.0f, 12101.0f, 12102.0f, 12103.0f, 12104.0f, 12105.0f, 12106.0f, 12107.0f, 12108.0f, 12109.0f, 12110.0f, 12111.0f, 12112.0f, 12113.0f, 12114.0f, 12115.0f, 12116.0f, 12117.0f, 12118.0f, 12119.0f, 12120.0f, 12121.0f, 12122.0f, 12123.0f, 12124.0f, 12125.0f, 12126.0f, 12127.0f, 12128.0f, 12129.0f, 12130.0f, 12131.0f, 12132.0f, 12133.0f, 12134.0f, 12135.0f, 12136.0f, 12137.0f, 12138.0f, 12139.0f, 12140.0f, 12141.0f, 12142.0f, 12143.0f, 12144.0f, 12145.0f, 12146.0f, 12147.0f, 12148.0f, 12149.0f, 12150.0f, 12151.0f, 12152.0f, 12153.0f, 12154.0f, 12155.0f, 12156.0f, 12157.0f, 12158.0f, 12159.0f, 12160.0f, 12161.0f, 12162.0f, 12163.0f, 12164.0f, 12165.0f, 12166.0f, 12167.0f, 12168.0f, 12169.0f, 12170.0f, 12171.0f, 12172.0f, 12173.0f, 12174.0f, 12175.0f, 12176.0f, 12177.0f, 12178.0f, 12179.0f, 12180.0f, 12181.0f, 12182.0f, 12183.0f, 12184.0f, 12185.0f, 12186.0f, 12187.0f, 12188.0f, 12189.0f, 12190.0f, 12191.0f, 12192.0f, 12193.0f, 12194.0f, 12195.0f, 12196.0f, 12197.0f, 12198.0f, 12199.0f, 12200.0f, 12201.0f, 12202.0f, 12203.0f, 12204.0f, 12205.0f, 12206.0f, 12207.0f, 12208.0f, 12209.0f, 12210.0f, 12211.0f, 12212.0f, 12213.0f, 12214.0f, 12215.0f, 12216.0f, 12217.0f, 12218.0f, 12219.0f, 12220.0f, 12221.0f, 12222.0f, 12223.0f, 12224.0f, 12225.0f, 12226.0f, 12227.0f, 12228.0f, 12229.0f, 12230.0f, 12231.0f, 12232.0f, 12233.0f, 12234.0f, 12235.0f, 12236.0f, 12237.0f, 12238.0f, 12239.0f, 12240.0f, 12241.0f, 12242.0f, 12243.0f, 12244.0f, 12245.0f, 12246.0f, 12247.0f, 12248.0f, 12249.0f, 12250.0f, 12251.0f, 12252.0f, 12253.0f, 12254.0f, 12255.0f, 12256.0f, 12257.0f, 12258.0f, 12259.0f, 12260.0f, 12261.0f, 12262.0f, 12263.0f, 12264.0f, 12265.0f, 12266.0f, 12267.0f, 12268.0f, 12269.0f, 12270.0f, 12271.0f, 12272.0f, 12273.0f, 12274.0f, 12275.0f, 12276.0f, 12277.0f, 12278.0f, 12279.0f, 12280.0f, 12281.0f, 12282.0f, 12283.0f, 12284.0f, 12285.0f, 12286.0f, 12287.0f, 12288.0f, 12289.0f, 12290.0f, 12291.0f, 12292.0f, 12293.0f, 12294.0f, 12295.0f, 12296.0f, 12297.0f, 12298.0f, 12299.0f, 12300.0f, 12301.0f, 12302.0f, 12303.0f, 12304.0f, 12305.0f, 12306.0f, 12307.0f, 12308.0f, 12309.0f, 12310.0f, 12311.0f, 12312.0f, 12313.0f, 12314.0f, 12315.0f, 12316.0f, 12317.0f, 12318.0f, 12319.0f, 12320.0f, 12321.0f, 12322.0f, 12323.0f, 12324.0f, 12325.0f, 12326.0f, 12327.0f, 12328.0f, 12329.0f, 12330.0f, 12331.0f, 12332.0f, 12333.0f, 12334.0f, 12335.0f, 12336.0f, 12337.0f, 12338.0f, 12339.0f, 12340.0f, 12341.0f, 12342.0f, 12343.0f, 12344.0f, 12345.0f, 12346.0f, 12347.0f, 12348.0f, 12349.0f, 12350.0f, 12351.0f, 12352.0f, 12353.0f, 12354.0f, 12355.0f, 12356.0f, 12357.0f, 12358.0f, 12359.0f, 12360.0f, 12361.0f, 12362.0f, 12363.0f, 12364.0f, 12365.0f, 12366.0f, 12367.0f, 12368.0f, 12369.0f, 12370.0f, 12371.0f, 12372.0f, 12373.0f, 12374.0f, 12375.0f, 12376.0f, 12377.0f, 12378.0f, 12379.0f, 12380.0f, 12381.0f, 12382.0f, 12383.0f, 12384.0f, 12385.0f, 12386.0f, 12387.0f, 12388.0f, 12389.0f, 12390.0f, 12391.0f, 12392.0f, 12393.0f, 12394.0f, 12395.0f, 12396.0f, 12397.0f, 12398.0f, 12399.0f, 12400.0f, 12401.0f, 12402.0f, 12403.0f, 12404.0f, 12405.0f, 12406.0f, 12407.0f, 12408.0f, 12409.0f, 12410.0f, 12411.0f, 12412.0f, 12413.0f, 12414.0f, 12415.0f, 12416.0f, 12417.0f, 12418.0f, 12419.0f, 12420.0f, 12421.0f, 12422.0f, 12423.0f, 12424.0f, 12425.0f, 12426.0f, 12427.0f, 12428.0f, 12429.0f, 12430.0f, 12431.0f, 12432.0f, 12433.0f, 12434.0f, 12435.0f, 12436.0f, 12437.0f, 12438.0f, 12439.0f, 12440.0f, 12441.0f, 12442.0f, 12443.0f, 12444.0f, 12445.0f, 12446.0f, 12447.0f, 12448.0f, 12449.0f, 12450.0f, 12451.0f, 12452.0f, 12453.0f, 12454.0f, 12455.0f, 12456.0f, 12457.0f, 12458.0f, 12459.0f, 12460.0f, 12461.0f, 12462.0f, 12463.0f, 12464.0f, 12465.0f, 12466.0f, 12467.0f, 12468.0f, 12469.0f, 12470.0f, 12471.0f, 12472.0f, 12473.0f, 12474.0f, 12475.0f, 12476.0f, 12477.0f, 12478.0f, 12479.0f, 12480.0f, 12481.0f, 12482.0f, 12483.0f, 12484.0f, 12485.0f, 12486.0f, 12487.0f, 12488.0f, 12489.0f, 12490.0f, 12491.0f, 12492.0f, 12493.0f, 12494.0f, 12495.0f, 12496.0f, 12497.0f, 12498.0f, 12499.0f, 12500.0f, 12501.0f, 12502.0f, 12503.0f, 12504.0f, 12505.0f, 12506.0f, 12507.0f, 12508.0f, 12509.0f, 12510.0f, 12511.0f, 12512.0f, 12513.0f, 12514.0f, 12515.0f, 12516.0f, 12517.0f, 12518.0f, 12519.0f, 12520.0f, 12521.0f, 12522.0f, 12523.0f, 12524.0f, 12525.0f, 12526.0f, 12527.0f, 12528.0f, 12529.0f, 12530.0f, 12531.0f, 12532.0f, 12533.0f, 12534.0f, 12535.0f, 12536.0f, 12537.0f, 12538.0f, 12539.0f, 12540.0f, 12541.0f, 12542.0f, 12543.0f, 12544.0f, 12545.0f, 12546.0f, 12547.0f, 12548.0f, 12549.0f, 12550.0f, 12551.0f, 12552.0f, 12553.0f, 12554.0f, 12555.0f, 12556.0f, 12557.0f, 12558.0f, 12559.0f, 12560.0f, 12561.0f, 12562.0f, 12563.0f, 12564.0f, 12565.0f, 12566.0f, 12567.0f, 12568.0f, 12569.0f, 12570.0f, 12571.0f, 12572.0f, 12573.0f, 12574.0f, 12575.0f, 12576.0f, 12577.0f, 12578.0f, 12579.0f, 12580.0f, 12581.0f, 12582.0f, 12583.0f, 12584.0f, 12585.0f, 12586.0f, 12587.0f, 12588.0f, 12589.0f, 12590.0f, 12591.0f, 12592.0f, 12593.0f, 12594.0f, 12595.0f, 12596.0f, 12597.0f, 12598.0f, 12599.0f, 12600.0f, 12601.0f, 12602.0f, 12603.0f, 12604.0f, 12605.0f, 12606.0f, 12607.0f, 12608.0f, 12609.0f, 12610.0f, 12611.0f, 12612.0f, 12613.0f, 12614.0f, 12615.0f, 12616.0f, 12617.0f, 12618.0f, 12619.0f, 12620.0f, 12621.0f, 12622.0f, 12623.0f, 12624.0f, 12625.0f, 12626.0f, 12627.0f, 12628.0f, 12629.0f, 12630.0f, 12631.0f, 12632.0f, 12633.0f, 12634.0f, 12635.0f, 12636.0f, 12637.0f, 12638.0f, 12639.0f, 12640.0f, 12641.0f, 12642.0f, 12643.0f, 12644.0f, 12645.0f, 12646.0f, 12647.0f, 12648.0f, 12649.0f, 12650.0f, 12651.0f, 12652.0f, 12653.0f, 12654.0f, 12655.0f, 12656.0f, 12657.0f, 12658.0f, 12659.0f, 12660.0f, 12661.0f, 12662.0f, 12663.0f, 12664.0f, 12665.0f, 12666.0f, 12667.0f, 12668.0f, 12669.0f, 12670.0f, 12671.0f, 12672.0f, 12673.0f, 12674.0f, 12675.0f, 12676.0f, 12677.0f, 12678.0f, 12679.0f, 12680.0f, 12681.0f, 12682.0f, 12683.0f, 12684.0f, 12685.0f, 12686.0f, 12687.0f, 12688.0f, 12689.0f, 12690.0f, 12691.0f, 12692.0f, 12693.0f, 12694.0f, 12695.0f, 12696.0f, 12697.0f, 12698.0f, 12699.0f, 12700.0f, 12701.0f, 12702.0f, 12703.0f, 12704.0f, 12705.0f, 12706.0f, 12707.0f, 12708.0f, 12709.0f, 12710.0f, 12711.0f, 12712.0f, 12713.0f, 12714.0f, 12715.0f, 12716.0f, 12717.0f, 12718.0f, 12719.0f, 12720.0f, 12721.0f, 12722.0f, 12723.0f, 12724.0f, 12725.0f, 12726.0f, 12727.0f, 12728.0f, 12729.0f, 12730.0f, 12731.0f, 12732.0f, 12733.0f, 12734.0f, 12735.0f, 12736.0f, 12737.0f, 12738.0f, 12739.0f, 12740.0f, 12741.0f, 12742.0f, 12743.0f, 12744.0f, 12745.0f, 12746.0f, 12747.0f, 12748.0f, 12749.0f, 12750.0f, 12751.0f, 12752.0f, 12753.0f, 12754.0f, 12755.0f, 12756.0f, 12757.0f, 12758.0f, 12759.0f, 12760.0f, 12761.0f, 12762.0f, 12763.0f, 12764.0f, 12765.0f, 12766.0f, 12767.0f, 12768.0f, 12769.0f, 12770.0f, 12771.0f, 12772.0f, 12773.0f, 12774.0f, 12775.0f, 12776.0f, 12777.0f, 12778.0f, 12779.0f, 12780.0f, 12781.0f, 12782.0f, 12783.0f, 12784.0f, 12785.0f, 12786.0f, 12787.0f, 12788.0f, 12789.0f, 12790.0f, 12791.0f, 12792.0f, 12793.0f, 12794.0f, 12795.0f, 12796.0f, 12797.0f, 12798.0f, 12799.0f, 12800.0f, 12801.0f, 12802.0f, 12803.0f, 12804.0f, 12805.0f, 12806.0f, 12807.0f, 12808.0f, 12809.0f, 12810.0f, 12811.0f, 12812.0f, 12813.0f, 12814.0f, 12815.0f, 12816.0f, 12817.0f, 12818.0f, 12819.0f, 12820.0f, 12821.0f, 12822.0f, 12823.0f, 12824.0f, 12825.0f, 12826.0f, 12827.0f, 12828.0f, 12829.0f, 12830.0f, 12831.0f, 12832.0f, 12833.0f, 12834.0f, 12835.0f, 12836.0f, 12837.0f, 12838.0f, 12839.0f, 12840.0f, 12841.0f, 12842.0f, 12843.0f, 12844.0f, 12845.0f, 12846.0f, 12847.0f, 12848.0f, 12849.0f, 12850.0f, 12851.0f, 12852.0f, 12853.0f, 12854.0f, 12855.0f, 12856.0f, 12857.0f, 12858.0f, 12859.0f, 12860.0f, 12861.0f, 12862.0f, 12863.0f, 12864.0f, 12865.0f, 12866.0f, 12867.0f, 12868.0f, 12869.0f, 12870.0f, 12871.0f, 12872.0f, 12873.0f, 12874.0f, 12875.0f, 12876.0f, 12877.0f, 12878.0f, 12879.0f, 12880.0f, 12881.0f, 12882.0f, 12883.0f, 12884.0f, 12885.0f, 12886.0f, 12887.0f, 12888.0f, 12889.0f, 12890.0f, 12891.0f, 12892.0f, 12893.0f, 12894.0f, 12895.0f, 12896.0f, 12897.0f, 12898.0f, 12899.0f, 12900.0f, 12901.0f, 12902.0f, 12903.0f, 12904.0f, 12905.0f, 12906.0f, 12907.0f, 12908.0f, 12909.0f, 12910.0f, 12911.0f, 12912.0f, 12913.0f, 12914.0f, 12915.0f, 12916.0f, 12917.0f, 12918.0f, 12919.0f, 12920.0f, 12921.0f, 12922.0f, 12923.0f, 12924.0f, 12925.0f, 12926.0f, 12927.0f, 12928.0f, 12929.0f, 12930.0f, 12931.0f, 12932.0f, 12933.0f, 12934.0f, 12935.0f, 12936.0f, 12937.0f, 12938.0f, 12939.0f, 12940.0f, 12941.0f, 12942.0f, 12943.0f, 12944.0f, 12945.0f, 12946.0f, 12947.0f, 12948.0f, 12949.0f, 12950.0f, 12951.0f, 12952.0f, 12953.0f, 12954.0f, 12955.0f, 12956.0f, 12957.0f, 12958.0f, 12959.0f, 12960.0f, 12961.0f, 12962.0f, 12963.0f, 12964.0f, 12965.0f, 12966.0f, 12967.0f, 12968.0f, 12969.0f, 12970.0f, 12971.0f, 12972.0f, 12973.0f, 12974.0f, 12975.0f, 12976.0f, 12977.0f, 12978.0f, 12979.0f, 12980.0f, 12981.0f, 12982.0f, 12983.0f, 12984.0f, 12985.0f, 12986.0f, 12987.0f, 12988.0f, 12989.0f, 12990.0f, 12991.0f, 12992.0f, 12993.0f, 12994.0f, 12995.0f, 12996.0f, 12997.0f, 12998.0f, 12999.0f, 13000.0f, 13001.0f, 13002.0f, 13003.0f, 13004.0f, 13005.0f, 13006.0f, 13007.0f, 13008.0f, 13009.0f, 13010.0f, 13011.0f, 13012.0f, 13013.0f, 13014.0f, 13015.0f, 13016.0f, 13017.0f, 13018.0f, 13019.0f, 13020.0f, 13021.0f, 13022.0f, 13023.0f, 13024.0f, 13025.0f, 13026.0f, 13027.0f, 13028.0f, 13029.0f, 13030.0f, 13031.0f, 13032.0f, 13033.0f, 13034.0f, 13035.0f, 13036.0f, 13037.0f, 13038.0f, 13039.0f, 13040.0f, 13041.0f, 13042.0f, 13043.0f, 13044.0f, 13045.0f, 13046.0f, 13047.0f, 13048.0f, 13049.0f, 13050.0f, 13051.0f, 13052.0f, 13053.0f, 13054.0f, 13055.0f, 13056.0f, 13057.0f, 13058.0f, 13059.0f, 13060.0f, 13061.0f, 13062.0f, 13063.0f, 13064.0f, 13065.0f, 13066.0f, 13067.0f, 13068.0f, 13069.0f, 13070.0f, 13071.0f, 13072.0f, 13073.0f, 13074.0f, 13075.0f, 13076.0f, 13077.0f, 13078.0f, 13079.0f, 13080.0f, 13081.0f, 13082.0f, 13083.0f, 13084.0f, 13085.0f, 13086.0f, 13087.0f, 13088.0f, 13089.0f, 13090.0f, 13091.0f, 13092.0f, 13093.0f, 13094.0f, 13095.0f, 13096.0f, 13097.0f, 13098.0f, 13099.0f, 13100.0f, 13101.0f, 13102.0f, 13103.0f, 13104.0f, 13105.0f, 13106.0f, 13107.0f, 13108.0f, 13109.0f, 13110.0f, 13111.0f, 13112.0f, 13113.0f, 13114.0f, 13115.0f, 13116.0f, 13117.0f, 13118.0f, 13119.0f, 13120.0f, 13121.0f, 13122.0f, 13123.0f, 13124.0f, 13125.0f, 13126.0f, 13127.0f, 13128.0f, 13129.0f, 13130.0f, 13131.0f, 13132.0f, 13133.0f, 13134.0f, 13135.0f, 13136.0f, 13137.0f, 13138.0f, 13139.0f, 13140.0f, 13141.0f, 13142.0f, 13143.0f, 13144.0f, 13145.0f, 13146.0f, 13147.0f, 13148.0f, 13149.0f, 13150.0f, 13151.0f, 13152.0f, 13153.0f, 13154.0f, 13155.0f, 13156.0f, 13157.0f, 13158.0f, 13159.0f, 13160.0f, 13161.0f, 13162.0f, 13163.0f, 13164.0f, 13165.0f, 13166.0f, 13167.0f, 13168.0f, 13169.0f, 13170.0f, 13171.0f, 13172.0f, 13173.0f, 13174.0f, 13175.0f, 13176.0f, 13177.0f, 13178.0f, 13179.0f, 13180.0f, 13181.0f, 13182.0f, 13183.0f, 13184.0f, 13185.0f, 13186.0f, 13187.0f, 13188.0f, 13189.0f, 13190.0f, 13191.0f, 13192.0f, 13193.0f, 13194.0f, 13195.0f, 13196.0f, 13197.0f, 13198.0f, 13199.0f, 13200.0f, 13201.0f, 13202.0f, 13203.0f, 13204.0f, 13205.0f, 13206.0f, 13207.0f, 13208.0f, 13209.0f, 13210.0f, 13211.0f, 13212.0f, 13213.0f, 13214.0f, 13215.0f, 13216.0f, 13217.0f, 13218.0f, 13219.0f, 13220.0f, 13221.0f, 13222.0f, 13223.0f, 13224.0f, 13225.0f, 13226.0f, 13227.0f, 13228.0f, 13229.0f, 13230.0f, 13231.0f, 13232.0f, 13233.0f, 13234.0f, 13235.0f, 13236.0f, 13237.0f, 13238.0f, 13239.0f, 13240.0f, 13241.0f, 13242.0f, 13243.0f, 13244.0f, 13245.0f, 13246.0f, 13247.0f, 13248.0f, 13249.0f, 13250.0f, 13251.0f, 13252.0f, 13253.0f, 13254.0f, 13255.0f, 13256.0f, 13257.0f, 13258.0f, 13259.0f, 13260.0f, 13261.0f, 13262.0f, 13263.0f, 13264.0f, 13265.0f, 13266.0f, 13267.0f, 13268.0f, 13269.0f, 13270.0f, 13271.0f, 13272.0f, 13273.0f, 13274.0f, 13275.0f, 13276.0f, 13277.0f, 13278.0f, 13279.0f, 13280.0f, 13281.0f, 13282.0f, 13283.0f, 13284.0f, 13285.0f, 13286.0f, 13287.0f, 13288.0f, 13289.0f, 13290.0f, 13291.0f, 13292.0f, 13293.0f, 13294.0f, 13295.0f, 13296.0f, 13297.0f, 13298.0f, 13299.0f, 13300.0f, 13301.0f, 13302.0f, 13303.0f, 13304.0f, 13305.0f, 13306.0f, 13307.0f, 13308.0f, 13309.0f, 13310.0f, 13311.0f, 13312.0f, 13313.0f, 13314.0f, 13315.0f, 13316.0f, 13317.0f, 13318.0f, 13319.0f, 13320.0f, 13321.0f, 13322.0f, 13323.0f, 13324.0f, 13325.0f, 13326.0f, 13327.0f, 13328.0f, 13329.0f, 13330.0f, 13331.0f, 13332.0f, 13333.0f, 13334.0f, 13335.0f, 13336.0f, 13337.0f, 13338.0f, 13339.0f, 13340.0f, 13341.0f, 13342.0f, 13343.0f, 13344.0f, 13345.0f, 13346.0f, 13347.0f, 13348.0f, 13349.0f, 13350.0f, 13351.0f, 13352.0f, 13353.0f, 13354.0f, 13355.0f, 13356.0f, 13357.0f, 13358.0f, 13359.0f, 13360.0f, 13361.0f, 13362.0f, 13363.0f, 13364.0f, 13365.0f, 13366.0f, 13367.0f, 13368.0f, 13369.0f, 13370.0f, 13371.0f, 13372.0f, 13373.0f, 13374.0f, 13375.0f, 13376.0f, 13377.0f, 13378.0f, 13379.0f, 13380.0f, 13381.0f, 13382.0f, 13383.0f, 13384.0f, 13385.0f, 13386.0f, 13387.0f, 13388.0f, 13389.0f, 13390.0f, 13391.0f, 13392.0f, 13393.0f, 13394.0f, 13395.0f, 13396.0f, 13397.0f, 13398.0f, 13399.0f, 13400.0f, 13401.0f, 13402.0f, 13403.0f, 13404.0f, 13405.0f, 13406.0f, 13407.0f, 13408.0f, 13409.0f, 13410.0f, 13411.0f, 13412.0f, 13413.0f, 13414.0f, 13415.0f, 13416.0f, 13417.0f, 13418.0f, 13419.0f, 13420.0f, 13421.0f, 13422.0f, 13423.0f, 13424.0f, 13425.0f, 13426.0f, 13427.0f, 13428.0f, 13429.0f, 13430.0f, 13431.0f, 13432.0f, 13433.0f, 13434.0f, 13435.0f, 13436.0f, 13437.0f, 13438.0f, 13439.0f, 13440.0f, 13441.0f, 13442.0f, 13443.0f, 13444.0f, 13445.0f, 13446.0f, 13447.0f, 13448.0f, 13449.0f, 13450.0f, 13451.0f, 13452.0f, 13453.0f, 13454.0f, 13455.0f, 13456.0f, 13457.0f, 13458.0f, 13459.0f, 13460.0f, 13461.0f, 13462.0f, 13463.0f, 13464.0f, 13465.0f, 13466.0f, 13467.0f, 13468.0f, 13469.0f, 13470.0f, 13471.0f, 13472.0f, 13473.0f, 13474.0f, 13475.0f, 13476.0f, 13477.0f, 13478.0f, 13479.0f, 13480.0f, 13481.0f, 13482.0f, 13483.0f, 13484.0f, 13485.0f, 13486.0f, 13487.0f, 13488.0f, 13489.0f, 13490.0f, 13491.0f, 13492.0f, 13493.0f, 13494.0f, 13495.0f, 13496.0f, 13497.0f, 13498.0f, 13499.0f, 13500.0f, 13501.0f, 13502.0f, 13503.0f, 13504.0f, 13505.0f, 13506.0f, 13507.0f, 13508.0f, 13509.0f, 13510.0f, 13511.0f, 13512.0f, 13513.0f, 13514.0f, 13515.0f, 13516.0f, 13517.0f, 13518.0f, 13519.0f, 13520.0f, 13521.0f, 13522.0f, 13523.0f, 13524.0f, 13525.0f, 13526.0f, 13527.0f, 13528.0f, 13529.0f, 13530.0f, 13531.0f, 13532.0f, 13533.0f, 13534.0f, 13535.0f, 13536.0f, 13537.0f, 13538.0f, 13539.0f, 13540.0f, 13541.0f, 13542.0f, 13543.0f, 13544.0f, 13545.0f, 13546.0f, 13547.0f, 13548.0f, 13549.0f, 13550.0f, 13551.0f, 13552.0f, 13553.0f, 13554.0f, 13555.0f, 13556.0f, 13557.0f, 13558.0f, 13559.0f, 13560.0f, 13561.0f, 13562.0f, 13563.0f, 13564.0f, 13565.0f, 13566.0f, 13567.0f, 13568.0f, 13569.0f, 13570.0f, 13571.0f, 13572.0f, 13573.0f, 13574.0f, 13575.0f, 13576.0f, 13577.0f, 13578.0f, 13579.0f, 13580.0f, 13581.0f, 13582.0f, 13583.0f, 13584.0f, 13585.0f, 13586.0f, 13587.0f, 13588.0f, 13589.0f, 13590.0f, 13591.0f, 13592.0f, 13593.0f, 13594.0f, 13595.0f, 13596.0f, 13597.0f, 13598.0f, 13599.0f, 13600.0f, 13601.0f, 13602.0f, 13603.0f, 13604.0f, 13605.0f, 13606.0f, 13607.0f, 13608.0f, 13609.0f, 13610.0f, 13611.0f, 13612.0f, 13613.0f, 13614.0f, 13615.0f, 13616.0f, 13617.0f, 13618.0f, 13619.0f, 13620.0f, 13621.0f, 13622.0f, 13623.0f, 13624.0f, 13625.0f, 13626.0f, 13627.0f, 13628.0f, 13629.0f, 13630.0f, 13631.0f, 13632.0f, 13633.0f, 13634.0f, 13635.0f, 13636.0f, 13637.0f, 13638.0f, 13639.0f, 13640.0f, 13641.0f, 13642.0f, 13643.0f, 13644.0f, 13645.0f, 13646.0f, 13647.0f, 13648.0f, 13649.0f, 13650.0f, 13651.0f, 13652.0f, 13653.0f, 13654.0f, 13655.0f, 13656.0f, 13657.0f, 13658.0f, 13659.0f, 13660.0f, 13661.0f, 13662.0f, 13663.0f, 13664.0f, 13665.0f, 13666.0f, 13667.0f, 13668.0f, 13669.0f, 13670.0f, 13671.0f, 13672.0f, 13673.0f, 13674.0f, 13675.0f, 13676.0f, 13677.0f, 13678.0f, 13679.0f, 13680.0f, 13681.0f, 13682.0f, 13683.0f, 13684.0f, 13685.0f, 13686.0f, 13687.0f, 13688.0f, 13689.0f, 13690.0f, 13691.0f, 13692.0f, 13693.0f, 13694.0f, 13695.0f, 13696.0f, 13697.0f, 13698.0f, 13699.0f, 13700.0f, 13701.0f, 13702.0f, 13703.0f, 13704.0f, 13705.0f, 13706.0f, 13707.0f, 13708.0f, 13709.0f, 13710.0f, 13711.0f, 13712.0f, 13713.0f, 13714.0f, 13715.0f, 13716.0f, 13717.0f, 13718.0f, 13719.0f, 13720.0f, 13721.0f, 13722.0f, 13723.0f, 13724.0f, 13725.0f, 13726.0f, 13727.0f, 13728.0f, 13729.0f, 13730.0f, 13731.0f, 13732.0f, 13733.0f, 13734.0f, 13735.0f, 13736.0f, 13737.0f, 13738.0f, 13739.0f, 13740.0f, 13741.0f, 13742.0f, 13743.0f, 13744.0f, 13745.0f, 13746.0f, 13747.0f, 13748.0f, 13749.0f, 13750.0f, 13751.0f, 13752.0f, 13753.0f, 13754.0f, 13755.0f, 13756.0f, 13757.0f, 13758.0f, 13759.0f, 13760.0f, 13761.0f, 13762.0f, 13763.0f, 13764.0f, 13765.0f, 13766.0f, 13767.0f, 13768.0f, 13769.0f, 13770.0f, 13771.0f, 13772.0f, 13773.0f, 13774.0f, 13775.0f, 13776.0f, 13777.0f, 13778.0f, 13779.0f, 13780.0f, 13781.0f, 13782.0f, 13783.0f, 13784.0f, 13785.0f, 13786.0f, 13787.0f, 13788.0f, 13789.0f, 13790.0f, 13791.0f, 13792.0f, 13793.0f, 13794.0f, 13795.0f, 13796.0f, 13797.0f, 13798.0f, 13799.0f, 13800.0f, 13801.0f, 13802.0f, 13803.0f, 13804.0f, 13805.0f, 13806.0f, 13807.0f, 13808.0f, 13809.0f, 13810.0f, 13811.0f, 13812.0f, 13813.0f, 13814.0f, 13815.0f, 13816.0f, 13817.0f, 13818.0f, 13819.0f, 13820.0f, 13821.0f, 13822.0f, 13823.0f, 13824.0f, 13825.0f, 13826.0f, 13827.0f, 13828.0f, 13829.0f, 13830.0f, 13831.0f, 13832.0f, 13833.0f, 13834.0f, 13835.0f, 13836.0f, 13837.0f, 13838.0f, 13839.0f, 13840.0f, 13841.0f, 13842.0f, 13843.0f, 13844.0f, 13845.0f, 13846.0f, 13847.0f, 13848.0f, 13849.0f, 13850.0f, 13851.0f, 13852.0f, 13853.0f, 13854.0f, 13855.0f, 13856.0f, 13857.0f, 13858.0f, 13859.0f, 13860.0f, 13861.0f, 13862.0f, 13863.0f, 13864.0f, 13865.0f, 13866.0f, 13867.0f, 13868.0f, 13869.0f, 13870.0f, 13871.0f, 13872.0f, 13873.0f, 13874.0f, 13875.0f, 13876.0f, 13877.0f, 13878.0f, 13879.0f, 13880.0f, 13881.0f, 13882.0f, 13883.0f, 13884.0f, 13885.0f, 13886.0f, 13887.0f, 13888.0f, 13889.0f, 13890.0f, 13891.0f, 13892.0f, 13893.0f, 13894.0f, 13895.0f, 13896.0f, 13897.0f, 13898.0f, 13899.0f, 13900.0f, 13901.0f, 13902.0f, 13903.0f, 13904.0f, 13905.0f, 13906.0f, 13907.0f, 13908.0f, 13909.0f, 13910.0f, 13911.0f, 13912.0f, 13913.0f, 13914.0f, 13915.0f, 13916.0f, 13917.0f, 13918.0f, 13919.0f, 13920.0f, 13921.0f, 13922.0f, 13923.0f, 13924.0f, 13925.0f, 13926.0f, 13927.0f, 13928.0f, 13929.0f, 13930.0f, 13931.0f, 13932.0f, 13933.0f, 13934.0f, 13935.0f, 13936.0f, 13937.0f, 13938.0f, 13939.0f, 13940.0f, 13941.0f, 13942.0f, 13943.0f, 13944.0f, 13945.0f, 13946.0f, 13947.0f, 13948.0f, 13949.0f, 13950.0f, 13951.0f, 13952.0f, 13953.0f, 13954.0f, 13955.0f, 13956.0f, 13957.0f, 13958.0f, 13959.0f, 13960.0f, 13961.0f, 13962.0f, 13963.0f, 13964.0f, 13965.0f, 13966.0f, 13967.0f, 13968.0f, 13969.0f, 13970.0f, 13971.0f, 13972.0f, 13973.0f, 13974.0f, 13975.0f, 13976.0f, 13977.0f, 13978.0f, 13979.0f, 13980.0f, 13981.0f, 13982.0f, 13983.0f, 13984.0f, 13985.0f, 13986.0f, 13987.0f, 13988.0f, 13989.0f, 13990.0f, 13991.0f, 13992.0f, 13993.0f, 13994.0f, 13995.0f, 13996.0f, 13997.0f, 13998.0f, 13999.0f, 14000.0f, 14001.0f, 14002.0f, 14003.0f, 14004.0f, 14005.0f, 14006.0f, 14007.0f, 14008.0f, 14009.0f, 14010.0f, 14011.0f, 14012.0f, 14013.0f, 14014.0f, 14015.0f, 14016.0f, 14017.0f, 14018.0f, 14019.0f, 14020.0f, 14021.0f, 14022.0f, 14023.0f, 14024.0f, 14025.0f, 14026.0f, 14027.0f, 14028.0f, 14029.0f, 14030.0f, 14031.0f, 14032.0f, 14033.0f, 14034.0f, 14035.0f, 14036.0f, 14037.0f, 14038.0f, 14039.0f, 14040.0f, 14041.0f, 14042.0f, 14043.0f, 14044.0f, 14045.0f, 14046.0f, 14047.0f, 14048.0f, 14049.0f, 14050.0f, 14051.0f, 14052.0f, 14053.0f, 14054.0f, 14055.0f, 14056.0f, 14057.0f, 14058.0f, 14059.0f, 14060.0f, 14061.0f, 14062.0f, 14063.0f, 14064.0f, 14065.0f, 14066.0f, 14067.0f, 14068.0f, 14069.0f, 14070.0f, 14071.0f, 14072.0f, 14073.0f, 14074.0f, 14075.0f, 14076.0f, 14077.0f, 14078.0f, 14079.0f, 14080.0f, 14081.0f, 14082.0f, 14083.0f, 14084.0f, 14085.0f, 14086.0f, 14087.0f, 14088.0f, 14089.0f, 14090.0f, 14091.0f, 14092.0f, 14093.0f, 14094.0f, 14095.0f, 14096.0f, 14097.0f, 14098.0f, 14099.0f, 14100.0f, 14101.0f, 14102.0f, 14103.0f, 14104.0f, 14105.0f, 14106.0f, 14107.0f, 14108.0f, 14109.0f, 14110.0f, 14111.0f, 14112.0f, 14113.0f, 14114.0f, 14115.0f, 14116.0f, 14117.0f, 14118.0f, 14119.0f, 14120.0f, 14121.0f, 14122.0f, 14123.0f, 14124.0f, 14125.0f, 14126.0f, 14127.0f, 14128.0f, 14129.0f, 14130.0f, 14131.0f, 14132.0f, 14133.0f, 14134.0f, 14135.0f, 14136.0f, 14137.0f, 14138.0f, 14139.0f, 14140.0f, 14141.0f, 14142.0f, 14143.0f, 14144.0f, 14145.0f, 14146.0f, 14147.0f, 14148.0f, 14149.0f, 14150.0f, 14151.0f, 14152.0f, 14153.0f, 14154.0f, 14155.0f, 14156.0f, 14157.0f, 14158.0f, 14159.0f, 14160.0f, 14161.0f, 14162.0f, 14163.0f, 14164.0f, 14165.0f, 14166.0f, 14167.0f, 14168.0f, 14169.0f, 14170.0f, 14171.0f, 14172.0f, 14173.0f, 14174.0f, 14175.0f, 14176.0f, 14177.0f, 14178.0f, 14179.0f, 14180.0f, 14181.0f, 14182.0f, 14183.0f, 14184.0f, 14185.0f, 14186.0f, 14187.0f, 14188.0f, 14189.0f, 14190.0f, 14191.0f, 14192.0f, 14193.0f, 14194.0f, 14195.0f, 14196.0f, 14197.0f, 14198.0f, 14199.0f, 14200.0f, 14201.0f, 14202.0f, 14203.0f, 14204.0f, 14205.0f, 14206.0f, 14207.0f, 14208.0f, 14209.0f, 14210.0f, 14211.0f, 14212.0f, 14213.0f, 14214.0f, 14215.0f, 14216.0f, 14217.0f, 14218.0f, 14219.0f, 14220.0f, 14221.0f, 14222.0f, 14223.0f, 14224.0f, 14225.0f, 14226.0f, 14227.0f, 14228.0f, 14229.0f, 14230.0f, 14231.0f, 14232.0f, 14233.0f, 14234.0f, 14235.0f, 14236.0f, 14237.0f, 14238.0f, 14239.0f, 14240.0f, 14241.0f, 14242.0f, 14243.0f, 14244.0f, 14245.0f, 14246.0f, 14247.0f, 14248.0f, 14249.0f, 14250.0f, 14251.0f, 14252.0f, 14253.0f, 14254.0f, 14255.0f, 14256.0f, 14257.0f, 14258.0f, 14259.0f, 14260.0f, 14261.0f, 14262.0f, 14263.0f, 14264.0f, 14265.0f, 14266.0f, 14267.0f, 14268.0f, 14269.0f, 14270.0f, 14271.0f, 14272.0f, 14273.0f, 14274.0f, 14275.0f, 14276.0f, 14277.0f, 14278.0f, 14279.0f, 14280.0f, 14281.0f, 14282.0f, 14283.0f, 14284.0f, 14285.0f, 14286.0f, 14287.0f, 14288.0f, 14289.0f, 14290.0f, 14291.0f, 14292.0f, 14293.0f, 14294.0f, 14295.0f, 14296.0f, 14297.0f, 14298.0f, 14299.0f, 14300.0f, 14301.0f, 14302.0f, 14303.0f, 14304.0f, 14305.0f, 14306.0f, 14307.0f, 14308.0f, 14309.0f, 14310.0f, 14311.0f, 14312.0f, 14313.0f, 14314.0f, 14315.0f, 14316.0f, 14317.0f, 14318.0f, 14319.0f, 14320.0f, 14321.0f, 14322.0f, 14323.0f, 14324.0f, 14325.0f, 14326.0f, 14327.0f, 14328.0f, 14329.0f, 14330.0f, 14331.0f, 14332.0f, 14333.0f, 14334.0f, 14335.0f, 14336.0f, 14337.0f, 14338.0f, 14339.0f, 14340.0f, 14341.0f, 14342.0f, 14343.0f, 14344.0f, 14345.0f, 14346.0f, 14347.0f, 14348.0f, 14349.0f, 14350.0f, 14351.0f, 14352.0f, 14353.0f, 14354.0f, 14355.0f, 14356.0f, 14357.0f, 14358.0f, 14359.0f, 14360.0f, 14361.0f, 14362.0f, 14363.0f, 14364.0f, 14365.0f, 14366.0f, 14367.0f, 14368.0f, 14369.0f, 14370.0f, 14371.0f, 14372.0f, 14373.0f, 14374.0f, 14375.0f, 14376.0f, 14377.0f, 14378.0f, 14379.0f, 14380.0f, 14381.0f, 14382.0f, 14383.0f, 14384.0f, 14385.0f, 14386.0f, 14387.0f, 14388.0f, 14389.0f, 14390.0f, 14391.0f, 14392.0f, 14393.0f, 14394.0f, 14395.0f, 14396.0f, 14397.0f, 14398.0f, 14399.0f, 14400.0f, 14401.0f, 14402.0f, 14403.0f, 14404.0f, 14405.0f, 14406.0f, 14407.0f, 14408.0f, 14409.0f, 14410.0f, 14411.0f, 14412.0f, 14413.0f, 14414.0f, 14415.0f, 14416.0f, 14417.0f, 14418.0f, 14419.0f, 14420.0f, 14421.0f, 14422.0f, 14423.0f, 14424.0f, 14425.0f, 14426.0f, 14427.0f, 14428.0f, 14429.0f, 14430.0f, 14431.0f, 14432.0f, 14433.0f, 14434.0f, 14435.0f, 14436.0f, 14437.0f, 14438.0f, 14439.0f, 14440.0f, 14441.0f, 14442.0f, 14443.0f, 14444.0f, 14445.0f, 14446.0f, 14447.0f, 14448.0f, 14449.0f, 14450.0f, 14451.0f, 14452.0f, 14453.0f, 14454.0f, 14455.0f, 14456.0f, 14457.0f, 14458.0f, 14459.0f, 14460.0f, 14461.0f, 14462.0f, 14463.0f, 14464.0f, 14465.0f, 14466.0f, 14467.0f, 14468.0f, 14469.0f, 14470.0f, 14471.0f, 14472.0f, 14473.0f, 14474.0f, 14475.0f, 14476.0f, 14477.0f, 14478.0f, 14479.0f, 14480.0f, 14481.0f, 14482.0f, 14483.0f, 14484.0f, 14485.0f, 14486.0f, 14487.0f, 14488.0f, 14489.0f, 14490.0f, 14491.0f, 14492.0f, 14493.0f, 14494.0f, 14495.0f, 14496.0f, 14497.0f, 14498.0f, 14499.0f, 14500.0f, 14501.0f, 14502.0f, 14503.0f, 14504.0f, 14505.0f, 14506.0f, 14507.0f, 14508.0f, 14509.0f, 14510.0f, 14511.0f, 14512.0f, 14513.0f, 14514.0f, 14515.0f, 14516.0f, 14517.0f, 14518.0f, 14519.0f, 14520.0f, 14521.0f, 14522.0f, 14523.0f, 14524.0f, 14525.0f, 14526.0f, 14527.0f, 14528.0f, 14529.0f, 14530.0f, 14531.0f, 14532.0f, 14533.0f, 14534.0f, 14535.0f, 14536.0f, 14537.0f, 14538.0f, 14539.0f, 14540.0f, 14541.0f, 14542.0f, 14543.0f, 14544.0f, 14545.0f, 14546.0f, 14547.0f, 14548.0f, 14549.0f, 14550.0f, 14551.0f, 14552.0f, 14553.0f, 14554.0f, 14555.0f, 14556.0f, 14557.0f, 14558.0f, 14559.0f, 14560.0f, 14561.0f, 14562.0f, 14563.0f, 14564.0f, 14565.0f, 14566.0f, 14567.0f, 14568.0f, 14569.0f, 14570.0f, 14571.0f, 14572.0f, 14573.0f, 14574.0f, 14575.0f, 14576.0f, 14577.0f, 14578.0f, 14579.0f, 14580.0f, 14581.0f, 14582.0f, 14583.0f, 14584.0f, 14585.0f, 14586.0f, 14587.0f, 14588.0f, 14589.0f, 14590.0f, 14591.0f, 14592.0f, 14593.0f, 14594.0f, 14595.0f, 14596.0f, 14597.0f, 14598.0f, 14599.0f, 14600.0f, 14601.0f, 14602.0f, 14603.0f, 14604.0f, 14605.0f, 14606.0f, 14607.0f, 14608.0f, 14609.0f, 14610.0f, 14611.0f, 14612.0f, 14613.0f, 14614.0f, 14615.0f, 14616.0f, 14617.0f, 14618.0f, 14619.0f, 14620.0f, 14621.0f, 14622.0f, 14623.0f, 14624.0f, 14625.0f, 14626.0f, 14627.0f, 14628.0f, 14629.0f, 14630.0f, 14631.0f, 14632.0f, 14633.0f, 14634.0f, 14635.0f, 14636.0f, 14637.0f, 14638.0f, 14639.0f, 14640.0f, 14641.0f, 14642.0f, 14643.0f, 14644.0f, 14645.0f, 14646.0f, 14647.0f, 14648.0f, 14649.0f, 14650.0f, 14651.0f, 14652.0f, 14653.0f, 14654.0f, 14655.0f, 14656.0f, 14657.0f, 14658.0f, 14659.0f, 14660.0f, 14661.0f, 14662.0f, 14663.0f, 14664.0f, 14665.0f, 14666.0f, 14667.0f, 14668.0f, 14669.0f, 14670.0f, 14671.0f, 14672.0f, 14673.0f, 14674.0f, 14675.0f, 14676.0f, 14677.0f, 14678.0f, 14679.0f, 14680.0f, 14681.0f, 14682.0f, 14683.0f, 14684.0f, 14685.0f, 14686.0f, 14687.0f, 14688.0f, 14689.0f, 14690.0f, 14691.0f, 14692.0f, 14693.0f, 14694.0f, 14695.0f, 14696.0f, 14697.0f, 14698.0f, 14699.0f, 14700.0f, 14701.0f, 14702.0f, 14703.0f, 14704.0f, 14705.0f, 14706.0f, 14707.0f, 14708.0f, 14709.0f, 14710.0f, 14711.0f, 14712.0f, 14713.0f, 14714.0f, 14715.0f, 14716.0f, 14717.0f, 14718.0f, 14719.0f, 14720.0f, 14721.0f, 14722.0f, 14723.0f, 14724.0f, 14725.0f, 14726.0f, 14727.0f, 14728.0f, 14729.0f, 14730.0f, 14731.0f, 14732.0f, 14733.0f, 14734.0f, 14735.0f, 14736.0f, 14737.0f, 14738.0f, 14739.0f, 14740.0f, 14741.0f, 14742.0f, 14743.0f, 14744.0f, 14745.0f, 14746.0f, 14747.0f, 14748.0f, 14749.0f, 14750.0f, 14751.0f, 14752.0f, 14753.0f, 14754.0f, 14755.0f, 14756.0f, 14757.0f, 14758.0f, 14759.0f, 14760.0f, 14761.0f, 14762.0f, 14763.0f, 14764.0f, 14765.0f, 14766.0f, 14767.0f, 14768.0f, 14769.0f, 14770.0f, 14771.0f, 14772.0f, 14773.0f, 14774.0f, 14775.0f, 14776.0f, 14777.0f, 14778.0f, 14779.0f, 14780.0f, 14781.0f, 14782.0f, 14783.0f, 14784.0f, 14785.0f, 14786.0f, 14787.0f, 14788.0f, 14789.0f, 14790.0f, 14791.0f, 14792.0f, 14793.0f, 14794.0f, 14795.0f, 14796.0f, 14797.0f, 14798.0f, 14799.0f, 14800.0f, 14801.0f, 14802.0f, 14803.0f, 14804.0f, 14805.0f, 14806.0f, 14807.0f, 14808.0f, 14809.0f, 14810.0f, 14811.0f, 14812.0f, 14813.0f, 14814.0f, 14815.0f, 14816.0f, 14817.0f, 14818.0f, 14819.0f, 14820.0f, 14821.0f, 14822.0f, 14823.0f, 14824.0f, 14825.0f, 14826.0f, 14827.0f, 14828.0f, 14829.0f, 14830.0f, 14831.0f, 14832.0f, 14833.0f, 14834.0f, 14835.0f, 14836.0f, 14837.0f, 14838.0f, 14839.0f, 14840.0f, 14841.0f, 14842.0f, 14843.0f, 14844.0f, 14845.0f, 14846.0f, 14847.0f, 14848.0f, 14849.0f, 14850.0f, 14851.0f, 14852.0f, 14853.0f, 14854.0f, 14855.0f, 14856.0f, 14857.0f, 14858.0f, 14859.0f, 14860.0f, 14861.0f, 14862.0f, 14863.0f, 14864.0f, 14865.0f, 14866.0f, 14867.0f, 14868.0f, 14869.0f, 14870.0f, 14871.0f, 14872.0f, 14873.0f, 14874.0f, 14875.0f, 14876.0f, 14877.0f, 14878.0f, 14879.0f, 14880.0f, 14881.0f, 14882.0f, 14883.0f, 14884.0f, 14885.0f, 14886.0f, 14887.0f, 14888.0f, 14889.0f, 14890.0f, 14891.0f, 14892.0f, 14893.0f, 14894.0f, 14895.0f, 14896.0f, 14897.0f, 14898.0f, 14899.0f, 14900.0f, 14901.0f, 14902.0f, 14903.0f, 14904.0f, 14905.0f, 14906.0f, 14907.0f, 14908.0f, 14909.0f, 14910.0f, 14911.0f, 14912.0f, 14913.0f, 14914.0f, 14915.0f, 14916.0f, 14917.0f, 14918.0f, 14919.0f, 14920.0f, 14921.0f, 14922.0f, 14923.0f, 14924.0f, 14925.0f, 14926.0f, 14927.0f, 14928.0f, 14929.0f, 14930.0f, 14931.0f, 14932.0f, 14933.0f, 14934.0f, 14935.0f, 14936.0f, 14937.0f, 14938.0f, 14939.0f, 14940.0f, 14941.0f, 14942.0f, 14943.0f, 14944.0f, 14945.0f, 14946.0f, 14947.0f, 14948.0f, 14949.0f, 14950.0f, 14951.0f, 14952.0f, 14953.0f, 14954.0f, 14955.0f, 14956.0f, 14957.0f, 14958.0f, 14959.0f, 14960.0f, 14961.0f, 14962.0f, 14963.0f, 14964.0f, 14965.0f, 14966.0f, 14967.0f, 14968.0f, 14969.0f, 14970.0f, 14971.0f, 14972.0f, 14973.0f, 14974.0f, 14975.0f, 14976.0f, 14977.0f, 14978.0f, 14979.0f, 14980.0f, 14981.0f, 14982.0f, 14983.0f, 14984.0f, 14985.0f, 14986.0f, 14987.0f, 14988.0f, 14989.0f, 14990.0f, 14991.0f, 14992.0f, 14993.0f, 14994.0f, 14995.0f, 14996.0f, 14997.0f, 14998.0f, 14999.0f, 15000.0f, 15001.0f, 15002.0f, 15003.0f, 15004.0f, 15005.0f, 15006.0f, 15007.0f, 15008.0f, 15009.0f, 15010.0f, 15011.0f, 15012.0f, 15013.0f, 15014.0f, 15015.0f, 15016.0f, 15017.0f, 15018.0f, 15019.0f, 15020.0f, 15021.0f, 15022.0f, 15023.0f, 15024.0f, 15025.0f, 15026.0f, 15027.0f, 15028.0f, 15029.0f, 15030.0f, 15031.0f, 15032.0f, 15033.0f, 15034.0f, 15035.0f, 15036.0f, 15037.0f, 15038.0f, 15039.0f, 15040.0f, 15041.0f, 15042.0f, 15043.0f, 15044.0f, 15045.0f, 15046.0f, 15047.0f, 15048.0f, 15049.0f, 15050.0f, 15051.0f, 15052.0f, 15053.0f, 15054.0f, 15055.0f, 15056.0f, 15057.0f, 15058.0f, 15059.0f, 15060.0f, 15061.0f, 15062.0f, 15063.0f, 15064.0f, 15065.0f, 15066.0f, 15067.0f, 15068.0f, 15069.0f, 15070.0f, 15071.0f, 15072.0f, 15073.0f, 15074.0f, 15075.0f, 15076.0f, 15077.0f, 15078.0f, 15079.0f, 15080.0f, 15081.0f, 15082.0f, 15083.0f, 15084.0f, 15085.0f, 15086.0f, 15087.0f, 15088.0f, 15089.0f, 15090.0f, 15091.0f, 15092.0f, 15093.0f, 15094.0f, 15095.0f, 15096.0f, 15097.0f, 15098.0f, 15099.0f, 15100.0f, 15101.0f, 15102.0f, 15103.0f, 15104.0f, 15105.0f, 15106.0f, 15107.0f, 15108.0f, 15109.0f, 15110.0f, 15111.0f, 15112.0f, 15113.0f, 15114.0f, 15115.0f, 15116.0f, 15117.0f, 15118.0f, 15119.0f, 15120.0f, 15121.0f, 15122.0f, 15123.0f, 15124.0f, 15125.0f, 15126.0f, 15127.0f, 15128.0f, 15129.0f, 15130.0f, 15131.0f, 15132.0f, 15133.0f, 15134.0f, 15135.0f, 15136.0f, 15137.0f, 15138.0f, 15139.0f, 15140.0f, 15141.0f, 15142.0f, 15143.0f, 15144.0f, 15145.0f, 15146.0f, 15147.0f, 15148.0f, 15149.0f, 15150.0f, 15151.0f, 15152.0f, 15153.0f, 15154.0f, 15155.0f, 15156.0f, 15157.0f, 15158.0f, 15159.0f, 15160.0f, 15161.0f, 15162.0f, 15163.0f, 15164.0f, 15165.0f, 15166.0f, 15167.0f, 15168.0f, 15169.0f, 15170.0f, 15171.0f, 15172.0f, 15173.0f, 15174.0f, 15175.0f, 15176.0f, 15177.0f, 15178.0f, 15179.0f, 15180.0f, 15181.0f, 15182.0f, 15183.0f, 15184.0f, 15185.0f, 15186.0f, 15187.0f, 15188.0f, 15189.0f, 15190.0f, 15191.0f, 15192.0f, 15193.0f, 15194.0f, 15195.0f, 15196.0f, 15197.0f, 15198.0f, 15199.0f, 15200.0f, 15201.0f, 15202.0f, 15203.0f, 15204.0f, 15205.0f, 15206.0f, 15207.0f, 15208.0f, 15209.0f, 15210.0f, 15211.0f, 15212.0f, 15213.0f, 15214.0f, 15215.0f, 15216.0f, 15217.0f, 15218.0f, 15219.0f, 15220.0f, 15221.0f, 15222.0f, 15223.0f, 15224.0f, 15225.0f, 15226.0f, 15227.0f, 15228.0f, 15229.0f, 15230.0f, 15231.0f, 15232.0f, 15233.0f, 15234.0f, 15235.0f, 15236.0f, 15237.0f, 15238.0f, 15239.0f, 15240.0f, 15241.0f, 15242.0f, 15243.0f, 15244.0f, 15245.0f, 15246.0f, 15247.0f, 15248.0f, 15249.0f, 15250.0f, 15251.0f, 15252.0f, 15253.0f, 15254.0f, 15255.0f, 15256.0f, 15257.0f, 15258.0f, 15259.0f, 15260.0f, 15261.0f, 15262.0f, 15263.0f, 15264.0f, 15265.0f, 15266.0f, 15267.0f, 15268.0f, 15269.0f, 15270.0f, 15271.0f, 15272.0f, 15273.0f, 15274.0f, 15275.0f, 15276.0f, 15277.0f, 15278.0f, 15279.0f, 15280.0f, 15281.0f, 15282.0f, 15283.0f, 15284.0f, 15285.0f, 15286.0f, 15287.0f, 15288.0f, 15289.0f, 15290.0f, 15291.0f, 15292.0f, 15293.0f, 15294.0f, 15295.0f, 15296.0f, 15297.0f, 15298.0f, 15299.0f, 15300.0f, 15301.0f, 15302.0f, 15303.0f, 15304.0f, 15305.0f, 15306.0f, 15307.0f, 15308.0f, 15309.0f, 15310.0f, 15311.0f, 15312.0f, 15313.0f, 15314.0f, 15315.0f, 15316.0f, 15317.0f, 15318.0f, 15319.0f, 15320.0f, 15321.0f, 15322.0f, 15323.0f, 15324.0f, 15325.0f, 15326.0f, 15327.0f, 15328.0f, 15329.0f, 15330.0f, 15331.0f, 15332.0f, 15333.0f, 15334.0f, 15335.0f, 15336.0f, 15337.0f, 15338.0f, 15339.0f, 15340.0f, 15341.0f, 15342.0f, 15343.0f, 15344.0f, 15345.0f, 15346.0f, 15347.0f, 15348.0f, 15349.0f, 15350.0f, 15351.0f, 15352.0f, 15353.0f, 15354.0f, 15355.0f, 15356.0f, 15357.0f, 15358.0f, 15359.0f, 15360.0f, 15361.0f, 15362.0f, 15363.0f, 15364.0f, 15365.0f, 15366.0f, 15367.0f, 15368.0f, 15369.0f, 15370.0f, 15371.0f, 15372.0f, 15373.0f, 15374.0f, 15375.0f, 15376.0f, 15377.0f, 15378.0f, 15379.0f, 15380.0f, 15381.0f, 15382.0f, 15383.0f, 15384.0f, 15385.0f, 15386.0f, 15387.0f, 15388.0f, 15389.0f, 15390.0f, 15391.0f, 15392.0f, 15393.0f, 15394.0f, 15395.0f, 15396.0f, 15397.0f, 15398.0f, 15399.0f, 15400.0f, 15401.0f, 15402.0f, 15403.0f, 15404.0f, 15405.0f, 15406.0f, 15407.0f, 15408.0f, 15409.0f, 15410.0f, 15411.0f, 15412.0f, 15413.0f, 15414.0f, 15415.0f, 15416.0f, 15417.0f, 15418.0f, 15419.0f, 15420.0f, 15421.0f, 15422.0f, 15423.0f, 15424.0f, 15425.0f, 15426.0f, 15427.0f, 15428.0f, 15429.0f, 15430.0f, 15431.0f, 15432.0f, 15433.0f, 15434.0f, 15435.0f, 15436.0f, 15437.0f, 15438.0f, 15439.0f, 15440.0f, 15441.0f, 15442.0f, 15443.0f, 15444.0f, 15445.0f, 15446.0f, 15447.0f, 15448.0f, 15449.0f, 15450.0f, 15451.0f, 15452.0f, 15453.0f, 15454.0f, 15455.0f, 15456.0f, 15457.0f, 15458.0f, 15459.0f, 15460.0f, 15461.0f, 15462.0f, 15463.0f, 15464.0f, 15465.0f, 15466.0f, 15467.0f, 15468.0f, 15469.0f, 15470.0f, 15471.0f, 15472.0f, 15473.0f, 15474.0f, 15475.0f, 15476.0f, 15477.0f, 15478.0f, 15479.0f, 15480.0f, 15481.0f, 15482.0f, 15483.0f, 15484.0f, 15485.0f, 15486.0f, 15487.0f, 15488.0f, 15489.0f, 15490.0f, 15491.0f, 15492.0f, 15493.0f, 15494.0f, 15495.0f, 15496.0f, 15497.0f, 15498.0f, 15499.0f, 15500.0f, 15501.0f, 15502.0f, 15503.0f, 15504.0f, 15505.0f, 15506.0f, 15507.0f, 15508.0f, 15509.0f, 15510.0f, 15511.0f, 15512.0f, 15513.0f, 15514.0f, 15515.0f, 15516.0f, 15517.0f, 15518.0f, 15519.0f, 15520.0f, 15521.0f, 15522.0f, 15523.0f, 15524.0f, 15525.0f, 15526.0f, 15527.0f, 15528.0f, 15529.0f, 15530.0f, 15531.0f, 15532.0f, 15533.0f, 15534.0f, 15535.0f, 15536.0f, 15537.0f, 15538.0f, 15539.0f, 15540.0f, 15541.0f, 15542.0f, 15543.0f, 15544.0f, 15545.0f, 15546.0f, 15547.0f, 15548.0f, 15549.0f, 15550.0f, 15551.0f, 15552.0f, 15553.0f, 15554.0f, 15555.0f, 15556.0f, 15557.0f, 15558.0f, 15559.0f, 15560.0f, 15561.0f, 15562.0f, 15563.0f, 15564.0f, 15565.0f, 15566.0f, 15567.0f, 15568.0f, 15569.0f, 15570.0f, 15571.0f, 15572.0f, 15573.0f, 15574.0f, 15575.0f, 15576.0f, 15577.0f, 15578.0f, 15579.0f, 15580.0f, 15581.0f, 15582.0f, 15583.0f, 15584.0f, 15585.0f, 15586.0f, 15587.0f, 15588.0f, 15589.0f, 15590.0f, 15591.0f, 15592.0f, 15593.0f, 15594.0f, 15595.0f, 15596.0f, 15597.0f, 15598.0f, 15599.0f, 15600.0f, 15601.0f, 15602.0f, 15603.0f, 15604.0f, 15605.0f, 15606.0f, 15607.0f, 15608.0f, 15609.0f, 15610.0f, 15611.0f, 15612.0f, 15613.0f, 15614.0f, 15615.0f, 15616.0f, 15617.0f, 15618.0f, 15619.0f, 15620.0f, 15621.0f, 15622.0f, 15623.0f, 15624.0f, 15625.0f, 15626.0f, 15627.0f, 15628.0f, 15629.0f, 15630.0f, 15631.0f, 15632.0f, 15633.0f, 15634.0f, 15635.0f, 15636.0f, 15637.0f, 15638.0f, 15639.0f, 15640.0f, 15641.0f, 15642.0f, 15643.0f, 15644.0f, 15645.0f, 15646.0f, 15647.0f, 15648.0f, 15649.0f, 15650.0f, 15651.0f, 15652.0f, 15653.0f, 15654.0f, 15655.0f, 15656.0f, 15657.0f, 15658.0f, 15659.0f, 15660.0f, 15661.0f, 15662.0f, 15663.0f, 15664.0f, 15665.0f, 15666.0f, 15667.0f, 15668.0f, 15669.0f, 15670.0f, 15671.0f, 15672.0f, 15673.0f, 15674.0f, 15675.0f, 15676.0f, 15677.0f, 15678.0f, 15679.0f, 15680.0f, 15681.0f, 15682.0f, 15683.0f, 15684.0f, 15685.0f, 15686.0f, 15687.0f, 15688.0f, 15689.0f, 15690.0f, 15691.0f, 15692.0f, 15693.0f, 15694.0f, 15695.0f, 15696.0f, 15697.0f, 15698.0f, 15699.0f, 15700.0f, 15701.0f, 15702.0f, 15703.0f, 15704.0f, 15705.0f, 15706.0f, 15707.0f, 15708.0f, 15709.0f, 15710.0f, 15711.0f, 15712.0f, 15713.0f, 15714.0f, 15715.0f, 15716.0f, 15717.0f, 15718.0f, 15719.0f, 15720.0f, 15721.0f, 15722.0f, 15723.0f, 15724.0f, 15725.0f, 15726.0f, 15727.0f, 15728.0f, 15729.0f, 15730.0f, 15731.0f, 15732.0f, 15733.0f, 15734.0f, 15735.0f, 15736.0f, 15737.0f, 15738.0f, 15739.0f, 15740.0f, 15741.0f, 15742.0f, 15743.0f, 15744.0f, 15745.0f, 15746.0f, 15747.0f, 15748.0f, 15749.0f, 15750.0f, 15751.0f, 15752.0f, 15753.0f, 15754.0f, 15755.0f, 15756.0f, 15757.0f, 15758.0f, 15759.0f, 15760.0f, 15761.0f, 15762.0f, 15763.0f, 15764.0f, 15765.0f, 15766.0f, 15767.0f, 15768.0f, 15769.0f, 15770.0f, 15771.0f, 15772.0f, 15773.0f, 15774.0f, 15775.0f, 15776.0f, 15777.0f, 15778.0f, 15779.0f, 15780.0f, 15781.0f, 15782.0f, 15783.0f, 15784.0f, 15785.0f, 15786.0f, 15787.0f, 15788.0f, 15789.0f, 15790.0f, 15791.0f, 15792.0f, 15793.0f, 15794.0f, 15795.0f, 15796.0f, 15797.0f, 15798.0f, 15799.0f, 15800.0f, 15801.0f, 15802.0f, 15803.0f, 15804.0f, 15805.0f, 15806.0f, 15807.0f, 15808.0f, 15809.0f, 15810.0f, 15811.0f, 15812.0f, 15813.0f, 15814.0f, 15815.0f, 15816.0f, 15817.0f, 15818.0f, 15819.0f, 15820.0f, 15821.0f, 15822.0f, 15823.0f, 15824.0f, 15825.0f, 15826.0f, 15827.0f, 15828.0f, 15829.0f, 15830.0f, 15831.0f, 15832.0f, 15833.0f, 15834.0f, 15835.0f, 15836.0f, 15837.0f, 15838.0f, 15839.0f, 15840.0f, 15841.0f, 15842.0f, 15843.0f, 15844.0f, 15845.0f, 15846.0f, 15847.0f, 15848.0f, 15849.0f, 15850.0f, 15851.0f, 15852.0f, 15853.0f, 15854.0f, 15855.0f, 15856.0f, 15857.0f, 15858.0f, 15859.0f, 15860.0f, 15861.0f, 15862.0f, 15863.0f, 15864.0f, 15865.0f, 15866.0f, 15867.0f, 15868.0f, 15869.0f, 15870.0f, 15871.0f, 15872.0f, 15873.0f, 15874.0f, 15875.0f, 15876.0f, 15877.0f, 15878.0f, 15879.0f, 15880.0f, 15881.0f, 15882.0f, 15883.0f, 15884.0f, 15885.0f, 15886.0f, 15887.0f, 15888.0f, 15889.0f, 15890.0f, 15891.0f, 15892.0f, 15893.0f, 15894.0f, 15895.0f, 15896.0f, 15897.0f, 15898.0f, 15899.0f, 15900.0f, 15901.0f, 15902.0f, 15903.0f, 15904.0f, 15905.0f, 15906.0f, 15907.0f, 15908.0f, 15909.0f, 15910.0f, 15911.0f, 15912.0f, 15913.0f, 15914.0f, 15915.0f, 15916.0f, 15917.0f, 15918.0f, 15919.0f, 15920.0f, 15921.0f, 15922.0f, 15923.0f, 15924.0f, 15925.0f, 15926.0f, 15927.0f, 15928.0f, 15929.0f, 15930.0f, 15931.0f, 15932.0f, 15933.0f, 15934.0f, 15935.0f, 15936.0f, 15937.0f, 15938.0f, 15939.0f, 15940.0f, 15941.0f, 15942.0f, 15943.0f, 15944.0f, 15945.0f, 15946.0f, 15947.0f, 15948.0f, 15949.0f, 15950.0f, 15951.0f, 15952.0f, 15953.0f, 15954.0f, 15955.0f, 15956.0f, 15957.0f, 15958.0f, 15959.0f, 15960.0f, 15961.0f, 15962.0f, 15963.0f, 15964.0f, 15965.0f, 15966.0f, 15967.0f, 15968.0f, 15969.0f, 15970.0f, 15971.0f, 15972.0f, 15973.0f, 15974.0f, 15975.0f, 15976.0f, 15977.0f, 15978.0f, 15979.0f, 15980.0f, 15981.0f, 15982.0f, 15983.0f, 15984.0f, 15985.0f, 15986.0f, 15987.0f, 15988.0f, 15989.0f, 15990.0f, 15991.0f, 15992.0f, 15993.0f, 15994.0f, 15995.0f, 15996.0f, 15997.0f, 15998.0f, 15999.0f, 16000.0f, 16001.0f, 16002.0f, 16003.0f, 16004.0f, 16005.0f, 16006.0f, 16007.0f, 16008.0f, 16009.0f, 16010.0f, 16011.0f, 16012.0f, 16013.0f, 16014.0f, 16015.0f, 16016.0f, 16017.0f, 16018.0f, 16019.0f, 16020.0f, 16021.0f, 16022.0f, 16023.0f, 16024.0f, 16025.0f, 16026.0f, 16027.0f, 16028.0f, 16029.0f, 16030.0f, 16031.0f, 16032.0f, 16033.0f, 16034.0f, 16035.0f, 16036.0f, 16037.0f, 16038.0f, 16039.0f, 16040.0f, 16041.0f, 16042.0f, 16043.0f, 16044.0f, 16045.0f, 16046.0f, 16047.0f, 16048.0f, 16049.0f, 16050.0f, 16051.0f, 16052.0f, 16053.0f, 16054.0f, 16055.0f, 16056.0f, 16057.0f, 16058.0f, 16059.0f, 16060.0f, 16061.0f, 16062.0f, 16063.0f, 16064.0f, 16065.0f, 16066.0f, 16067.0f, 16068.0f, 16069.0f, 16070.0f, 16071.0f, 16072.0f, 16073.0f, 16074.0f, 16075.0f, 16076.0f, 16077.0f, 16078.0f, 16079.0f, 16080.0f, 16081.0f, 16082.0f, 16083.0f, 16084.0f, 16085.0f, 16086.0f, 16087.0f, 16088.0f, 16089.0f, 16090.0f, 16091.0f, 16092.0f, 16093.0f, 16094.0f, 16095.0f, 16096.0f, 16097.0f, 16098.0f, 16099.0f, 16100.0f, 16101.0f, 16102.0f, 16103.0f, 16104.0f, 16105.0f, 16106.0f, 16107.0f, 16108.0f, 16109.0f, 16110.0f, 16111.0f, 16112.0f, 16113.0f, 16114.0f, 16115.0f, 16116.0f, 16117.0f, 16118.0f, 16119.0f, 16120.0f, 16121.0f, 16122.0f, 16123.0f, 16124.0f, 16125.0f, 16126.0f, 16127.0f, 16128.0f, 16129.0f, 16130.0f, 16131.0f, 16132.0f, 16133.0f, 16134.0f, 16135.0f, 16136.0f, 16137.0f, 16138.0f, 16139.0f, 16140.0f, 16141.0f, 16142.0f, 16143.0f, 16144.0f, 16145.0f, 16146.0f, 16147.0f, 16148.0f, 16149.0f, 16150.0f, 16151.0f, 16152.0f, 16153.0f, 16154.0f, 16155.0f, 16156.0f, 16157.0f, 16158.0f, 16159.0f, 16160.0f, 16161.0f, 16162.0f, 16163.0f, 16164.0f, 16165.0f, 16166.0f, 16167.0f, 16168.0f, 16169.0f, 16170.0f, 16171.0f, 16172.0f, 16173.0f, 16174.0f, 16175.0f, 16176.0f, 16177.0f, 16178.0f, 16179.0f, 16180.0f, 16181.0f, 16182.0f, 16183.0f, 16184.0f, 16185.0f, 16186.0f, 16187.0f, 16188.0f, 16189.0f, 16190.0f, 16191.0f, 16192.0f, 16193.0f, 16194.0f, 16195.0f, 16196.0f, 16197.0f, 16198.0f, 16199.0f, 16200.0f, 16201.0f, 16202.0f, 16203.0f, 16204.0f, 16205.0f, 16206.0f, 16207.0f, 16208.0f, 16209.0f, 16210.0f, 16211.0f, 16212.0f, 16213.0f, 16214.0f, 16215.0f, 16216.0f, 16217.0f, 16218.0f, 16219.0f, 16220.0f, 16221.0f, 16222.0f, 16223.0f, 16224.0f, 16225.0f, 16226.0f, 16227.0f, 16228.0f, 16229.0f, 16230.0f, 16231.0f, 16232.0f, 16233.0f, 16234.0f, 16235.0f, 16236.0f, 16237.0f, 16238.0f, 16239.0f, 16240.0f, 16241.0f, 16242.0f, 16243.0f, 16244.0f, 16245.0f, 16246.0f, 16247.0f, 16248.0f, 16249.0f, 16250.0f, 16251.0f, 16252.0f, 16253.0f, 16254.0f, 16255.0f, 16256.0f, 16257.0f, 16258.0f, 16259.0f, 16260.0f, 16261.0f, 16262.0f, 16263.0f, 16264.0f, 16265.0f, 16266.0f, 16267.0f, 16268.0f, 16269.0f, 16270.0f, 16271.0f, 16272.0f, 16273.0f, 16274.0f, 16275.0f, 16276.0f, 16277.0f, 16278.0f, 16279.0f, 16280.0f, 16281.0f, 16282.0f, 16283.0f, 16284.0f, 16285.0f, 16286.0f, 16287.0f, 16288.0f, 16289.0f, 16290.0f, 16291.0f, 16292.0f, 16293.0f, 16294.0f, 16295.0f, 16296.0f, 16297.0f, 16298.0f, 16299.0f, 16300.0f, 16301.0f, 16302.0f, 16303.0f, 16304.0f, 16305.0f, 16306.0f, 16307.0f, 16308.0f, 16309.0f, 16310.0f, 16311.0f, 16312.0f, 16313.0f, 16314.0f, 16315.0f, 16316.0f, 16317.0f, 16318.0f, 16319.0f, 16320.0f, 16321.0f, 16322.0f, 16323.0f, 16324.0f, 16325.0f, 16326.0f, 16327.0f, 16328.0f, 16329.0f, 16330.0f, 16331.0f, 16332.0f, 16333.0f, 16334.0f, 16335.0f, 16336.0f, 16337.0f, 16338.0f, 16339.0f, 16340.0f, 16341.0f, 16342.0f, 16343.0f, 16344.0f, 16345.0f, 16346.0f, 16347.0f, 16348.0f, 16349.0f, 16350.0f, 16351.0f, 16352.0f, 16353.0f, 16354.0f, 16355.0f, 16356.0f, 16357.0f, 16358.0f, 16359.0f, 16360.0f, 16361.0f, 16362.0f, 16363.0f, 16364.0f, 16365.0f, 16366.0f, 16367.0f, 16368.0f, 16369.0f, 16370.0f, 16371.0f, 16372.0f, 16373.0f, 16374.0f, 16375.0f, 16376.0f, 16377.0f, 16378.0f, 16379.0f, 16380.0f, 16381.0f, 16382.0f, 16383.0f, 16384.0f, 16385.0f, 16386.0f, 16387.0f, 16388.0f, 16389.0f, 16390.0f, 16391.0f, 16392.0f, 16393.0f, 16394.0f, 16395.0f, 16396.0f, 16397.0f, 16398.0f, 16399.0f, 16400.0f, 16401.0f, 16402.0f, 16403.0f, 16404.0f, 16405.0f, 16406.0f, 16407.0f, 16408.0f, 16409.0f, 16410.0f, 16411.0f, 16412.0f, 16413.0f, 16414.0f, 16415.0f, 16416.0f, 16417.0f, 16418.0f, 16419.0f, 16420.0f, 16421.0f, 16422.0f, 16423.0f, 16424.0f, 16425.0f, 16426.0f, 16427.0f, 16428.0f, 16429.0f, 16430.0f, 16431.0f, 16432.0f, 16433.0f, 16434.0f, 16435.0f, 16436.0f, 16437.0f, 16438.0f, 16439.0f, 16440.0f, 16441.0f, 16442.0f, 16443.0f, 16444.0f, 16445.0f, 16446.0f, 16447.0f, 16448.0f, 16449.0f, 16450.0f, 16451.0f, 16452.0f, 16453.0f, 16454.0f, 16455.0f, 16456.0f, 16457.0f, 16458.0f, 16459.0f, 16460.0f, 16461.0f, 16462.0f, 16463.0f, 16464.0f, 16465.0f, 16466.0f, 16467.0f, 16468.0f, 16469.0f, 16470.0f, 16471.0f, 16472.0f, 16473.0f, 16474.0f, 16475.0f, 16476.0f, 16477.0f, 16478.0f, 16479.0f, 16480.0f, 16481.0f, 16482.0f, 16483.0f, 16484.0f, 16485.0f, 16486.0f, 16487.0f, 16488.0f, 16489.0f, 16490.0f, 16491.0f, 16492.0f, 16493.0f, 16494.0f, 16495.0f, 16496.0f, 16497.0f, 16498.0f, 16499.0f, 16500.0f, 16501.0f, 16502.0f, 16503.0f, 16504.0f, 16505.0f, 16506.0f, 16507.0f, 16508.0f, 16509.0f, 16510.0f, 16511.0f, 16512.0f, 16513.0f, 16514.0f, 16515.0f, 16516.0f, 16517.0f, 16518.0f, 16519.0f, 16520.0f, 16521.0f, 16522.0f, 16523.0f, 16524.0f, 16525.0f, 16526.0f, 16527.0f, 16528.0f, 16529.0f, 16530.0f, 16531.0f, 16532.0f, 16533.0f, 16534.0f, 16535.0f, 16536.0f, 16537.0f, 16538.0f, 16539.0f, 16540.0f, 16541.0f, 16542.0f, 16543.0f, 16544.0f, 16545.0f, 16546.0f, 16547.0f, 16548.0f, 16549.0f, 16550.0f, 16551.0f, 16552.0f, 16553.0f, 16554.0f, 16555.0f, 16556.0f, 16557.0f, 16558.0f, 16559.0f, 16560.0f, 16561.0f, 16562.0f, 16563.0f, 16564.0f, 16565.0f, 16566.0f, 16567.0f, 16568.0f, 16569.0f, 16570.0f, 16571.0f, 16572.0f, 16573.0f, 16574.0f, 16575.0f, 16576.0f, 16577.0f, 16578.0f, 16579.0f, 16580.0f, 16581.0f, 16582.0f, 16583.0f, 16584.0f, 16585.0f, 16586.0f, 16587.0f, 16588.0f, 16589.0f, 16590.0f, 16591.0f, 16592.0f, 16593.0f, 16594.0f, 16595.0f, 16596.0f, 16597.0f, 16598.0f, 16599.0f, 16600.0f, 16601.0f, 16602.0f, 16603.0f, 16604.0f, 16605.0f, 16606.0f, 16607.0f, 16608.0f, 16609.0f, 16610.0f, 16611.0f, 16612.0f, 16613.0f, 16614.0f, 16615.0f, 16616.0f, 16617.0f, 16618.0f, 16619.0f, 16620.0f, 16621.0f, 16622.0f, 16623.0f, 16624.0f, 16625.0f, 16626.0f, 16627.0f, 16628.0f, 16629.0f, 16630.0f, 16631.0f, 16632.0f, 16633.0f, 16634.0f, 16635.0f, 16636.0f, 16637.0f, 16638.0f, 16639.0f, 16640.0f, 16641.0f, 16642.0f, 16643.0f, 16644.0f, 16645.0f, 16646.0f, 16647.0f, 16648.0f, 16649.0f, 16650.0f, 16651.0f, 16652.0f, 16653.0f, 16654.0f, 16655.0f, 16656.0f, 16657.0f, 16658.0f, 16659.0f, 16660.0f, 16661.0f, 16662.0f, 16663.0f, 16664.0f, 16665.0f, 16666.0f, 16667.0f, 16668.0f, 16669.0f, 16670.0f, 16671.0f, 16672.0f, 16673.0f, 16674.0f, 16675.0f, 16676.0f, 16677.0f, 16678.0f, 16679.0f, 16680.0f, 16681.0f, 16682.0f, 16683.0f, 16684.0f, 16685.0f, 16686.0f, 16687.0f, 16688.0f, 16689.0f, 16690.0f, 16691.0f, 16692.0f, 16693.0f, 16694.0f, 16695.0f, 16696.0f, 16697.0f, 16698.0f, 16699.0f, 16700.0f, 16701.0f, 16702.0f, 16703.0f, 16704.0f, 16705.0f, 16706.0f, 16707.0f, 16708.0f, 16709.0f, 16710.0f, 16711.0f, 16712.0f, 16713.0f, 16714.0f, 16715.0f, 16716.0f, 16717.0f, 16718.0f, 16719.0f, 16720.0f, 16721.0f, 16722.0f, 16723.0f, 16724.0f, 16725.0f, 16726.0f, 16727.0f, 16728.0f, 16729.0f, 16730.0f, 16731.0f, 16732.0f, 16733.0f, 16734.0f, 16735.0f, 16736.0f, 16737.0f, 16738.0f, 16739.0f, 16740.0f, 16741.0f, 16742.0f, 16743.0f, 16744.0f, 16745.0f, 16746.0f, 16747.0f, 16748.0f, 16749.0f, 16750.0f, 16751.0f, 16752.0f, 16753.0f, 16754.0f, 16755.0f, 16756.0f, 16757.0f, 16758.0f, 16759.0f, 16760.0f, 16761.0f, 16762.0f, 16763.0f, 16764.0f, 16765.0f, 16766.0f, 16767.0f, 16768.0f, 16769.0f, 16770.0f, 16771.0f, 16772.0f, 16773.0f, 16774.0f, 16775.0f, 16776.0f, 16777.0f, 16778.0f, 16779.0f, 16780.0f, 16781.0f, 16782.0f, 16783.0f, 16784.0f, 16785.0f, 16786.0f, 16787.0f, 16788.0f, 16789.0f, 16790.0f, 16791.0f, 16792.0f, 16793.0f, 16794.0f, 16795.0f, 16796.0f, 16797.0f, 16798.0f, 16799.0f, 16800.0f, 16801.0f, 16802.0f, 16803.0f, 16804.0f, 16805.0f, 16806.0f, 16807.0f, 16808.0f, 16809.0f, 16810.0f, 16811.0f, 16812.0f, 16813.0f, 16814.0f, 16815.0f, 16816.0f, 16817.0f, 16818.0f, 16819.0f, 16820.0f, 16821.0f, 16822.0f, 16823.0f, 16824.0f, 16825.0f, 16826.0f, 16827.0f, 16828.0f, 16829.0f, 16830.0f, 16831.0f, 16832.0f, 16833.0f, 16834.0f, 16835.0f, 16836.0f, 16837.0f, 16838.0f, 16839.0f, 16840.0f, 16841.0f, 16842.0f, 16843.0f, 16844.0f, 16845.0f, 16846.0f, 16847.0f, 16848.0f, 16849.0f, 16850.0f, 16851.0f, 16852.0f, 16853.0f, 16854.0f, 16855.0f, 16856.0f, 16857.0f, 16858.0f, 16859.0f, 16860.0f, 16861.0f, 16862.0f, 16863.0f, 16864.0f, 16865.0f, 16866.0f, 16867.0f, 16868.0f, 16869.0f, 16870.0f, 16871.0f, 16872.0f, 16873.0f, 16874.0f, 16875.0f, 16876.0f, 16877.0f, 16878.0f, 16879.0f, 16880.0f, 16881.0f, 16882.0f, 16883.0f, 16884.0f, 16885.0f, 16886.0f, 16887.0f, 16888.0f, 16889.0f, 16890.0f, 16891.0f, 16892.0f, 16893.0f, 16894.0f, 16895.0f, 16896.0f, 16897.0f, 16898.0f, 16899.0f, 16900.0f, 16901.0f, 16902.0f, 16903.0f, 16904.0f, 16905.0f, 16906.0f, 16907.0f, 16908.0f, 16909.0f, 16910.0f, 16911.0f, 16912.0f, 16913.0f, 16914.0f, 16915.0f, 16916.0f, 16917.0f, 16918.0f, 16919.0f, 16920.0f, 16921.0f, 16922.0f, 16923.0f, 16924.0f, 16925.0f, 16926.0f, 16927.0f, 16928.0f, 16929.0f, 16930.0f, 16931.0f, 16932.0f, 16933.0f, 16934.0f, 16935.0f, 16936.0f, 16937.0f, 16938.0f, 16939.0f, 16940.0f, 16941.0f, 16942.0f, 16943.0f, 16944.0f, 16945.0f, 16946.0f, 16947.0f, 16948.0f, 16949.0f, 16950.0f, 16951.0f, 16952.0f, 16953.0f, 16954.0f, 16955.0f, 16956.0f, 16957.0f, 16958.0f, 16959.0f, 16960.0f, 16961.0f, 16962.0f, 16963.0f, 16964.0f, 16965.0f, 16966.0f, 16967.0f, 16968.0f, 16969.0f, 16970.0f, 16971.0f, 16972.0f, 16973.0f, 16974.0f, 16975.0f, 16976.0f, 16977.0f, 16978.0f, 16979.0f, 16980.0f, 16981.0f, 16982.0f, 16983.0f, 16984.0f, 16985.0f, 16986.0f, 16987.0f, 16988.0f, 16989.0f, 16990.0f, 16991.0f, 16992.0f, 16993.0f, 16994.0f, 16995.0f, 16996.0f, 16997.0f, 16998.0f, 16999.0f, 17000.0f, 17001.0f, 17002.0f, 17003.0f, 17004.0f, 17005.0f, 17006.0f, 17007.0f, 17008.0f, 17009.0f, 17010.0f, 17011.0f, 17012.0f, 17013.0f, 17014.0f, 17015.0f, 17016.0f, 17017.0f, 17018.0f, 17019.0f, 17020.0f, 17021.0f, 17022.0f, 17023.0f, 17024.0f, 17025.0f, 17026.0f, 17027.0f, 17028.0f, 17029.0f, 17030.0f, 17031.0f, 17032.0f, 17033.0f, 17034.0f, 17035.0f, 17036.0f, 17037.0f, 17038.0f, 17039.0f, 17040.0f, 17041.0f, 17042.0f, 17043.0f, 17044.0f, 17045.0f, 17046.0f, 17047.0f, 17048.0f, 17049.0f, 17050.0f, 17051.0f, 17052.0f, 17053.0f, 17054.0f, 17055.0f, 17056.0f, 17057.0f, 17058.0f, 17059.0f, 17060.0f, 17061.0f, 17062.0f, 17063.0f, 17064.0f, 17065.0f, 17066.0f, 17067.0f, 17068.0f, 17069.0f, 17070.0f, 17071.0f, 17072.0f, 17073.0f, 17074.0f, 17075.0f, 17076.0f, 17077.0f, 17078.0f, 17079.0f, 17080.0f, 17081.0f, 17082.0f, 17083.0f, 17084.0f, 17085.0f, 17086.0f, 17087.0f, 17088.0f, 17089.0f, 17090.0f, 17091.0f, 17092.0f, 17093.0f, 17094.0f, 17095.0f, 17096.0f, 17097.0f, 17098.0f, 17099.0f, 17100.0f, 17101.0f, 17102.0f, 17103.0f, 17104.0f, 17105.0f, 17106.0f, 17107.0f, 17108.0f, 17109.0f, 17110.0f, 17111.0f, 17112.0f, 17113.0f, 17114.0f, 17115.0f, 17116.0f, 17117.0f, 17118.0f, 17119.0f, 17120.0f, 17121.0f, 17122.0f, 17123.0f, 17124.0f, 17125.0f, 17126.0f, 17127.0f, 17128.0f, 17129.0f, 17130.0f, 17131.0f, 17132.0f, 17133.0f, 17134.0f, 17135.0f, 17136.0f, 17137.0f, 17138.0f, 17139.0f, 17140.0f, 17141.0f, 17142.0f, 17143.0f, 17144.0f, 17145.0f, 17146.0f, 17147.0f, 17148.0f, 17149.0f, 17150.0f, 17151.0f, 17152.0f, 17153.0f, 17154.0f, 17155.0f, 17156.0f, 17157.0f, 17158.0f, 17159.0f, 17160.0f, 17161.0f, 17162.0f, 17163.0f, 17164.0f, 17165.0f, 17166.0f, 17167.0f, 17168.0f, 17169.0f, 17170.0f, 17171.0f, 17172.0f, 17173.0f, 17174.0f, 17175.0f, 17176.0f, 17177.0f, 17178.0f, 17179.0f, 17180.0f, 17181.0f, 17182.0f, 17183.0f, 17184.0f, 17185.0f, 17186.0f, 17187.0f, 17188.0f, 17189.0f, 17190.0f, 17191.0f, 17192.0f, 17193.0f, 17194.0f, 17195.0f, 17196.0f, 17197.0f, 17198.0f, 17199.0f, 17200.0f, 17201.0f, 17202.0f, 17203.0f, 17204.0f, 17205.0f, 17206.0f, 17207.0f, 17208.0f, 17209.0f, 17210.0f, 17211.0f, 17212.0f, 17213.0f, 17214.0f, 17215.0f, 17216.0f, 17217.0f, 17218.0f, 17219.0f, 17220.0f, 17221.0f, 17222.0f, 17223.0f, 17224.0f, 17225.0f, 17226.0f, 17227.0f, 17228.0f, 17229.0f, 17230.0f, 17231.0f, 17232.0f, 17233.0f, 17234.0f, 17235.0f, 17236.0f, 17237.0f, 17238.0f, 17239.0f, 17240.0f, 17241.0f, 17242.0f, 17243.0f, 17244.0f, 17245.0f, 17246.0f, 17247.0f, 17248.0f, 17249.0f, 17250.0f, 17251.0f, 17252.0f, 17253.0f, 17254.0f, 17255.0f, 17256.0f, 17257.0f, 17258.0f, 17259.0f, 17260.0f, 17261.0f, 17262.0f, 17263.0f, 17264.0f, 17265.0f, 17266.0f, 17267.0f, 17268.0f, 17269.0f, 17270.0f, 17271.0f, 17272.0f, 17273.0f, 17274.0f, 17275.0f, 17276.0f, 17277.0f, 17278.0f, 17279.0f, 17280.0f, 17281.0f, 17282.0f, 17283.0f, 17284.0f, 17285.0f, 17286.0f, 17287.0f, 17288.0f, 17289.0f, 17290.0f, 17291.0f, 17292.0f, 17293.0f, 17294.0f, 17295.0f, 17296.0f, 17297.0f, 17298.0f, 17299.0f, 17300.0f, 17301.0f, 17302.0f, 17303.0f, 17304.0f, 17305.0f, 17306.0f, 17307.0f, 17308.0f, 17309.0f, 17310.0f, 17311.0f, 17312.0f, 17313.0f, 17314.0f, 17315.0f, 17316.0f, 17317.0f, 17318.0f, 17319.0f, 17320.0f, 17321.0f, 17322.0f, 17323.0f, 17324.0f, 17325.0f, 17326.0f, 17327.0f, 17328.0f, 17329.0f, 17330.0f, 17331.0f, 17332.0f, 17333.0f, 17334.0f, 17335.0f, 17336.0f, 17337.0f, 17338.0f, 17339.0f, 17340.0f, 17341.0f, 17342.0f, 17343.0f, 17344.0f, 17345.0f, 17346.0f, 17347.0f, 17348.0f, 17349.0f, 17350.0f, 17351.0f, 17352.0f, 17353.0f, 17354.0f, 17355.0f, 17356.0f, 17357.0f, 17358.0f, 17359.0f, 17360.0f, 17361.0f, 17362.0f, 17363.0f, 17364.0f, 17365.0f, 17366.0f, 17367.0f, 17368.0f, 17369.0f, 17370.0f, 17371.0f, 17372.0f, 17373.0f, 17374.0f, 17375.0f, 17376.0f, 17377.0f, 17378.0f, 17379.0f, 17380.0f, 17381.0f, 17382.0f, 17383.0f, 17384.0f, 17385.0f, 17386.0f, 17387.0f, 17388.0f, 17389.0f, 17390.0f, 17391.0f, 17392.0f, 17393.0f, 17394.0f, 17395.0f, 17396.0f, 17397.0f, 17398.0f, 17399.0f, 17400.0f, 17401.0f, 17402.0f, 17403.0f, 17404.0f, 17405.0f, 17406.0f, 17407.0f, 17408.0f, 17409.0f, 17410.0f, 17411.0f, 17412.0f, 17413.0f, 17414.0f, 17415.0f, 17416.0f, 17417.0f, 17418.0f, 17419.0f, 17420.0f, 17421.0f, 17422.0f, 17423.0f, 17424.0f, 17425.0f, 17426.0f, 17427.0f, 17428.0f, 17429.0f, 17430.0f, 17431.0f, 17432.0f, 17433.0f, 17434.0f, 17435.0f, 17436.0f, 17437.0f, 17438.0f, 17439.0f, 17440.0f, 17441.0f, 17442.0f, 17443.0f, 17444.0f, 17445.0f, 17446.0f, 17447.0f, 17448.0f, 17449.0f, 17450.0f, 17451.0f, 17452.0f, 17453.0f, 17454.0f, 17455.0f, 17456.0f, 17457.0f, 17458.0f, 17459.0f, 17460.0f, 17461.0f, 17462.0f, 17463.0f, 17464.0f, 17465.0f, 17466.0f, 17467.0f, 17468.0f, 17469.0f, 17470.0f, 17471.0f, 17472.0f, 17473.0f, 17474.0f, 17475.0f, 17476.0f, 17477.0f, 17478.0f, 17479.0f, 17480.0f, 17481.0f, 17482.0f, 17483.0f, 17484.0f, 17485.0f, 17486.0f, 17487.0f, 17488.0f, 17489.0f, 17490.0f, 17491.0f, 17492.0f, 17493.0f, 17494.0f, 17495.0f, 17496.0f, 17497.0f, 17498.0f, 17499.0f, 17500.0f, 17501.0f, 17502.0f, 17503.0f, 17504.0f, 17505.0f, 17506.0f, 17507.0f, 17508.0f, 17509.0f, 17510.0f, 17511.0f, 17512.0f, 17513.0f, 17514.0f, 17515.0f, 17516.0f, 17517.0f, 17518.0f, 17519.0f, 17520.0f, 17521.0f, 17522.0f, 17523.0f, 17524.0f, 17525.0f, 17526.0f, 17527.0f, 17528.0f, 17529.0f, 17530.0f, 17531.0f, 17532.0f, 17533.0f, 17534.0f, 17535.0f, 17536.0f, 17537.0f, 17538.0f, 17539.0f, 17540.0f, 17541.0f, 17542.0f, 17543.0f, 17544.0f, 17545.0f, 17546.0f, 17547.0f, 17548.0f, 17549.0f, 17550.0f, 17551.0f, 17552.0f, 17553.0f, 17554.0f, 17555.0f, 17556.0f, 17557.0f, 17558.0f, 17559.0f, 17560.0f, 17561.0f, 17562.0f, 17563.0f, 17564.0f, 17565.0f, 17566.0f, 17567.0f, 17568.0f, 17569.0f, 17570.0f, 17571.0f, 17572.0f, 17573.0f, 17574.0f, 17575.0f, 17576.0f, 17577.0f, 17578.0f, 17579.0f, 17580.0f, 17581.0f, 17582.0f, 17583.0f, 17584.0f, 17585.0f, 17586.0f, 17587.0f, 17588.0f, 17589.0f, 17590.0f, 17591.0f, 17592.0f, 17593.0f, 17594.0f, 17595.0f, 17596.0f, 17597.0f, 17598.0f, 17599.0f, 17600.0f, 17601.0f, 17602.0f, 17603.0f, 17604.0f, 17605.0f, 17606.0f, 17607.0f, 17608.0f, 17609.0f, 17610.0f, 17611.0f, 17612.0f, 17613.0f, 17614.0f, 17615.0f, 17616.0f, 17617.0f, 17618.0f, 17619.0f, 17620.0f, 17621.0f, 17622.0f, 17623.0f, 17624.0f, 17625.0f, 17626.0f, 17627.0f, 17628.0f, 17629.0f, 17630.0f, 17631.0f, 17632.0f, 17633.0f, 17634.0f, 17635.0f, 17636.0f, 17637.0f, 17638.0f, 17639.0f, 17640.0f, 17641.0f, 17642.0f, 17643.0f, 17644.0f, 17645.0f, 17646.0f, 17647.0f, 17648.0f, 17649.0f, 17650.0f, 17651.0f, 17652.0f, 17653.0f, 17654.0f, 17655.0f, 17656.0f, 17657.0f, 17658.0f, 17659.0f, 17660.0f, 17661.0f, 17662.0f, 17663.0f, 17664.0f, 17665.0f, 17666.0f, 17667.0f, 17668.0f, 17669.0f, 17670.0f, 17671.0f, 17672.0f, 17673.0f, 17674.0f, 17675.0f, 17676.0f, 17677.0f, 17678.0f, 17679.0f, 17680.0f, 17681.0f, 17682.0f, 17683.0f, 17684.0f, 17685.0f, 17686.0f, 17687.0f, 17688.0f, 17689.0f, 17690.0f, 17691.0f, 17692.0f, 17693.0f, 17694.0f, 17695.0f, 17696.0f, 17697.0f, 17698.0f, 17699.0f, 17700.0f, 17701.0f, 17702.0f, 17703.0f, 17704.0f, 17705.0f, 17706.0f, 17707.0f, 17708.0f, 17709.0f, 17710.0f, 17711.0f, 17712.0f, 17713.0f, 17714.0f, 17715.0f, 17716.0f, 17717.0f, 17718.0f, 17719.0f, 17720.0f, 17721.0f, 17722.0f, 17723.0f, 17724.0f, 17725.0f, 17726.0f, 17727.0f, 17728.0f, 17729.0f, 17730.0f, 17731.0f, 17732.0f, 17733.0f, 17734.0f, 17735.0f, 17736.0f, 17737.0f, 17738.0f, 17739.0f, 17740.0f, 17741.0f, 17742.0f, 17743.0f, 17744.0f, 17745.0f, 17746.0f, 17747.0f, 17748.0f, 17749.0f, 17750.0f, 17751.0f, 17752.0f, 17753.0f, 17754.0f, 17755.0f, 17756.0f, 17757.0f, 17758.0f, 17759.0f, 17760.0f, 17761.0f, 17762.0f, 17763.0f, 17764.0f, 17765.0f, 17766.0f, 17767.0f, 17768.0f, 17769.0f, 17770.0f, 17771.0f, 17772.0f, 17773.0f, 17774.0f, 17775.0f, 17776.0f, 17777.0f, 17778.0f, 17779.0f, 17780.0f, 17781.0f, 17782.0f, 17783.0f, 17784.0f, 17785.0f, 17786.0f, 17787.0f, 17788.0f, 17789.0f, 17790.0f, 17791.0f, 17792.0f, 17793.0f, 17794.0f, 17795.0f, 17796.0f, 17797.0f, 17798.0f, 17799.0f, 17800.0f, 17801.0f, 17802.0f, 17803.0f, 17804.0f, 17805.0f, 17806.0f, 17807.0f, 17808.0f, 17809.0f, 17810.0f, 17811.0f, 17812.0f, 17813.0f, 17814.0f, 17815.0f, 17816.0f, 17817.0f, 17818.0f, 17819.0f, 17820.0f, 17821.0f, 17822.0f, 17823.0f, 17824.0f, 17825.0f, 17826.0f, 17827.0f, 17828.0f, 17829.0f, 17830.0f, 17831.0f, 17832.0f, 17833.0f, 17834.0f, 17835.0f, 17836.0f, 17837.0f, 17838.0f, 17839.0f, 17840.0f, 17841.0f, 17842.0f, 17843.0f, 17844.0f, 17845.0f, 17846.0f, 17847.0f, 17848.0f, 17849.0f, 17850.0f, 17851.0f, 17852.0f, 17853.0f, 17854.0f, 17855.0f, 17856.0f, 17857.0f, 17858.0f, 17859.0f, 17860.0f, 17861.0f, 17862.0f, 17863.0f, 17864.0f, 17865.0f, 17866.0f, 17867.0f, 17868.0f, 17869.0f, 17870.0f, 17871.0f, 17872.0f, 17873.0f, 17874.0f, 17875.0f, 17876.0f, 17877.0f, 17878.0f, 17879.0f, 17880.0f, 17881.0f, 17882.0f, 17883.0f, 17884.0f, 17885.0f, 17886.0f, 17887.0f, 17888.0f, 17889.0f, 17890.0f, 17891.0f, 17892.0f, 17893.0f, 17894.0f, 17895.0f, 17896.0f, 17897.0f, 17898.0f, 17899.0f, 17900.0f, 17901.0f, 17902.0f, 17903.0f, 17904.0f, 17905.0f, 17906.0f, 17907.0f, 17908.0f, 17909.0f, 17910.0f, 17911.0f, 17912.0f, 17913.0f, 17914.0f, 17915.0f, 17916.0f, 17917.0f, 17918.0f, 17919.0f, 17920.0f, 17921.0f, 17922.0f, 17923.0f, 17924.0f, 17925.0f, 17926.0f, 17927.0f, 17928.0f, 17929.0f, 17930.0f, 17931.0f, 17932.0f, 17933.0f, 17934.0f, 17935.0f, 17936.0f, 17937.0f, 17938.0f, 17939.0f, 17940.0f, 17941.0f, 17942.0f, 17943.0f, 17944.0f, 17945.0f, 17946.0f, 17947.0f, 17948.0f, 17949.0f, 17950.0f, 17951.0f, 17952.0f, 17953.0f, 17954.0f, 17955.0f, 17956.0f, 17957.0f, 17958.0f, 17959.0f, 17960.0f, 17961.0f, 17962.0f, 17963.0f, 17964.0f, 17965.0f, 17966.0f, 17967.0f, 17968.0f, 17969.0f, 17970.0f, 17971.0f, 17972.0f, 17973.0f, 17974.0f, 17975.0f, 17976.0f, 17977.0f, 17978.0f, 17979.0f, 17980.0f, 17981.0f, 17982.0f, 17983.0f, 17984.0f, 17985.0f, 17986.0f, 17987.0f, 17988.0f, 17989.0f, 17990.0f, 17991.0f, 17992.0f, 17993.0f, 17994.0f, 17995.0f, 17996.0f, 17997.0f, 17998.0f, 17999.0f, 18000.0f, 18001.0f, 18002.0f, 18003.0f, 18004.0f, 18005.0f, 18006.0f, 18007.0f, 18008.0f, 18009.0f, 18010.0f, 18011.0f, 18012.0f, 18013.0f, 18014.0f, 18015.0f, 18016.0f, 18017.0f, 18018.0f, 18019.0f, 18020.0f, 18021.0f, 18022.0f, 18023.0f, 18024.0f, 18025.0f, 18026.0f, 18027.0f, 18028.0f, 18029.0f, 18030.0f, 18031.0f, 18032.0f, 18033.0f, 18034.0f, 18035.0f, 18036.0f, 18037.0f, 18038.0f, 18039.0f, 18040.0f, 18041.0f, 18042.0f, 18043.0f, 18044.0f, 18045.0f, 18046.0f, 18047.0f, 18048.0f, 18049.0f, 18050.0f, 18051.0f, 18052.0f, 18053.0f, 18054.0f, 18055.0f, 18056.0f, 18057.0f, 18058.0f, 18059.0f, 18060.0f, 18061.0f, 18062.0f, 18063.0f, 18064.0f, 18065.0f, 18066.0f, 18067.0f, 18068.0f, 18069.0f, 18070.0f, 18071.0f, 18072.0f, 18073.0f, 18074.0f, 18075.0f, 18076.0f, 18077.0f, 18078.0f, 18079.0f, 18080.0f, 18081.0f, 18082.0f, 18083.0f, 18084.0f, 18085.0f, 18086.0f, 18087.0f, 18088.0f, 18089.0f, 18090.0f, 18091.0f, 18092.0f, 18093.0f, 18094.0f, 18095.0f, 18096.0f, 18097.0f, 18098.0f, 18099.0f, 18100.0f, 18101.0f, 18102.0f, 18103.0f, 18104.0f, 18105.0f, 18106.0f, 18107.0f, 18108.0f, 18109.0f, 18110.0f, 18111.0f, 18112.0f, 18113.0f, 18114.0f, 18115.0f, 18116.0f, 18117.0f, 18118.0f, 18119.0f, 18120.0f, 18121.0f, 18122.0f, 18123.0f, 18124.0f, 18125.0f, 18126.0f, 18127.0f, 18128.0f, 18129.0f, 18130.0f, 18131.0f, 18132.0f, 18133.0f, 18134.0f, 18135.0f, 18136.0f, 18137.0f, 18138.0f, 18139.0f, 18140.0f, 18141.0f, 18142.0f, 18143.0f, 18144.0f, 18145.0f, 18146.0f, 18147.0f, 18148.0f, 18149.0f, 18150.0f, 18151.0f, 18152.0f, 18153.0f, 18154.0f, 18155.0f, 18156.0f, 18157.0f, 18158.0f, 18159.0f, 18160.0f, 18161.0f, 18162.0f, 18163.0f, 18164.0f, 18165.0f, 18166.0f, 18167.0f, 18168.0f, 18169.0f, 18170.0f, 18171.0f, 18172.0f, 18173.0f, 18174.0f, 18175.0f, 18176.0f, 18177.0f, 18178.0f, 18179.0f, 18180.0f, 18181.0f, 18182.0f, 18183.0f, 18184.0f, 18185.0f, 18186.0f, 18187.0f, 18188.0f, 18189.0f, 18190.0f, 18191.0f, 18192.0f, 18193.0f, 18194.0f, 18195.0f, 18196.0f, 18197.0f, 18198.0f, 18199.0f, 18200.0f, 18201.0f, 18202.0f, 18203.0f, 18204.0f, 18205.0f, 18206.0f, 18207.0f, 18208.0f, 18209.0f, 18210.0f, 18211.0f, 18212.0f, 18213.0f, 18214.0f, 18215.0f, 18216.0f, 18217.0f, 18218.0f, 18219.0f, 18220.0f, 18221.0f, 18222.0f, 18223.0f, 18224.0f, 18225.0f, 18226.0f, 18227.0f, 18228.0f, 18229.0f, 18230.0f, 18231.0f, 18232.0f, 18233.0f, 18234.0f, 18235.0f, 18236.0f, 18237.0f, 18238.0f, 18239.0f, 18240.0f, 18241.0f, 18242.0f, 18243.0f, 18244.0f, 18245.0f, 18246.0f, 18247.0f, 18248.0f, 18249.0f, 18250.0f, 18251.0f, 18252.0f, 18253.0f, 18254.0f, 18255.0f, 18256.0f, 18257.0f, 18258.0f, 18259.0f, 18260.0f, 18261.0f, 18262.0f, 18263.0f, 18264.0f, 18265.0f, 18266.0f, 18267.0f, 18268.0f, 18269.0f, 18270.0f, 18271.0f, 18272.0f, 18273.0f, 18274.0f, 18275.0f, 18276.0f, 18277.0f, 18278.0f, 18279.0f, 18280.0f, 18281.0f, 18282.0f, 18283.0f, 18284.0f, 18285.0f, 18286.0f, 18287.0f, 18288.0f, 18289.0f, 18290.0f, 18291.0f, 18292.0f, 18293.0f, 18294.0f, 18295.0f, 18296.0f, 18297.0f, 18298.0f, 18299.0f, 18300.0f, 18301.0f, 18302.0f, 18303.0f, 18304.0f, 18305.0f, 18306.0f, 18307.0f, 18308.0f, 18309.0f, 18310.0f, 18311.0f, 18312.0f, 18313.0f, 18314.0f, 18315.0f, 18316.0f, 18317.0f, 18318.0f, 18319.0f, 18320.0f, 18321.0f, 18322.0f, 18323.0f, 18324.0f, 18325.0f, 18326.0f, 18327.0f, 18328.0f, 18329.0f, 18330.0f, 18331.0f, 18332.0f, 18333.0f, 18334.0f, 18335.0f, 18336.0f, 18337.0f, 18338.0f, 18339.0f, 18340.0f, 18341.0f, 18342.0f, 18343.0f, 18344.0f, 18345.0f, 18346.0f, 18347.0f, 18348.0f, 18349.0f, 18350.0f, 18351.0f, 18352.0f, 18353.0f, 18354.0f, 18355.0f, 18356.0f, 18357.0f, 18358.0f, 18359.0f, 18360.0f, 18361.0f, 18362.0f, 18363.0f, 18364.0f, 18365.0f, 18366.0f, 18367.0f, 18368.0f, 18369.0f, 18370.0f, 18371.0f, 18372.0f, 18373.0f, 18374.0f, 18375.0f, 18376.0f, 18377.0f, 18378.0f, 18379.0f, 18380.0f, 18381.0f, 18382.0f, 18383.0f, 18384.0f, 18385.0f, 18386.0f, 18387.0f, 18388.0f, 18389.0f, 18390.0f, 18391.0f, 18392.0f, 18393.0f, 18394.0f, 18395.0f, 18396.0f, 18397.0f, 18398.0f, 18399.0f, 18400.0f, 18401.0f, 18402.0f, 18403.0f, 18404.0f, 18405.0f, 18406.0f, 18407.0f, 18408.0f, 18409.0f, 18410.0f, 18411.0f, 18412.0f, 18413.0f, 18414.0f, 18415.0f, 18416.0f, 18417.0f, 18418.0f, 18419.0f, 18420.0f, 18421.0f, 18422.0f, 18423.0f, 18424.0f, 18425.0f, 18426.0f, 18427.0f, 18428.0f, 18429.0f, 18430.0f, 18431.0f, 18432.0f, 18433.0f, 18434.0f, 18435.0f, 18436.0f, 18437.0f, 18438.0f, 18439.0f, 18440.0f, 18441.0f, 18442.0f, 18443.0f, 18444.0f, 18445.0f, 18446.0f, 18447.0f, 18448.0f, 18449.0f, 18450.0f, 18451.0f, 18452.0f, 18453.0f, 18454.0f, 18455.0f, 18456.0f, 18457.0f, 18458.0f, 18459.0f, 18460.0f, 18461.0f, 18462.0f, 18463.0f, 18464.0f, 18465.0f, 18466.0f, 18467.0f, 18468.0f, 18469.0f, 18470.0f, 18471.0f, 18472.0f, 18473.0f, 18474.0f, 18475.0f, 18476.0f, 18477.0f, 18478.0f, 18479.0f, 18480.0f, 18481.0f, 18482.0f, 18483.0f, 18484.0f, 18485.0f, 18486.0f, 18487.0f, 18488.0f, 18489.0f, 18490.0f, 18491.0f, 18492.0f, 18493.0f, 18494.0f, 18495.0f, 18496.0f, 18497.0f, 18498.0f, 18499.0f, 18500.0f, 18501.0f, 18502.0f, 18503.0f, 18504.0f, 18505.0f, 18506.0f, 18507.0f, 18508.0f, 18509.0f, 18510.0f, 18511.0f, 18512.0f, 18513.0f, 18514.0f, 18515.0f, 18516.0f, 18517.0f, 18518.0f, 18519.0f, 18520.0f, 18521.0f, 18522.0f, 18523.0f, 18524.0f, 18525.0f, 18526.0f, 18527.0f, 18528.0f, 18529.0f, 18530.0f, 18531.0f, 18532.0f, 18533.0f, 18534.0f, 18535.0f, 18536.0f, 18537.0f, 18538.0f, 18539.0f, 18540.0f, 18541.0f, 18542.0f, 18543.0f, 18544.0f, 18545.0f, 18546.0f, 18547.0f, 18548.0f, 18549.0f, 18550.0f, 18551.0f, 18552.0f, 18553.0f, 18554.0f, 18555.0f, 18556.0f, 18557.0f, 18558.0f, 18559.0f, 18560.0f, 18561.0f, 18562.0f, 18563.0f, 18564.0f, 18565.0f, 18566.0f, 18567.0f, 18568.0f, 18569.0f, 18570.0f, 18571.0f, 18572.0f, 18573.0f, 18574.0f, 18575.0f, 18576.0f, 18577.0f, 18578.0f, 18579.0f, 18580.0f, 18581.0f, 18582.0f, 18583.0f, 18584.0f, 18585.0f, 18586.0f, 18587.0f, 18588.0f, 18589.0f, 18590.0f, 18591.0f, 18592.0f, 18593.0f, 18594.0f, 18595.0f, 18596.0f, 18597.0f, 18598.0f, 18599.0f, 18600.0f, 18601.0f, 18602.0f, 18603.0f, 18604.0f, 18605.0f, 18606.0f, 18607.0f, 18608.0f, 18609.0f, 18610.0f, 18611.0f, 18612.0f, 18613.0f, 18614.0f, 18615.0f, 18616.0f, 18617.0f, 18618.0f, 18619.0f, 18620.0f, 18621.0f, 18622.0f, 18623.0f, 18624.0f, 18625.0f, 18626.0f, 18627.0f, 18628.0f, 18629.0f, 18630.0f, 18631.0f, 18632.0f, 18633.0f, 18634.0f, 18635.0f, 18636.0f, 18637.0f, 18638.0f, 18639.0f, 18640.0f, 18641.0f, 18642.0f, 18643.0f, 18644.0f, 18645.0f, 18646.0f, 18647.0f, 18648.0f, 18649.0f, 18650.0f, 18651.0f, 18652.0f, 18653.0f, 18654.0f, 18655.0f, 18656.0f, 18657.0f, 18658.0f, 18659.0f, 18660.0f, 18661.0f, 18662.0f, 18663.0f, 18664.0f, 18665.0f, 18666.0f, 18667.0f, 18668.0f, 18669.0f, 18670.0f, 18671.0f, 18672.0f, 18673.0f, 18674.0f, 18675.0f, 18676.0f, 18677.0f, 18678.0f, 18679.0f, 18680.0f, 18681.0f, 18682.0f, 18683.0f, 18684.0f, 18685.0f, 18686.0f, 18687.0f, 18688.0f, 18689.0f, 18690.0f, 18691.0f, 18692.0f, 18693.0f, 18694.0f, 18695.0f, 18696.0f, 18697.0f, 18698.0f, 18699.0f, 18700.0f, 18701.0f, 18702.0f, 18703.0f, 18704.0f, 18705.0f, 18706.0f, 18707.0f, 18708.0f, 18709.0f, 18710.0f, 18711.0f, 18712.0f, 18713.0f, 18714.0f, 18715.0f, 18716.0f, 18717.0f, 18718.0f, 18719.0f, 18720.0f, 18721.0f, 18722.0f, 18723.0f, 18724.0f, 18725.0f, 18726.0f, 18727.0f, 18728.0f, 18729.0f, 18730.0f, 18731.0f, 18732.0f, 18733.0f, 18734.0f, 18735.0f, 18736.0f, 18737.0f, 18738.0f, 18739.0f, 18740.0f, 18741.0f, 18742.0f, 18743.0f, 18744.0f, 18745.0f, 18746.0f, 18747.0f, 18748.0f, 18749.0f, 18750.0f, 18751.0f, 18752.0f, 18753.0f, 18754.0f, 18755.0f, 18756.0f, 18757.0f, 18758.0f, 18759.0f, 18760.0f, 18761.0f, 18762.0f, 18763.0f, 18764.0f, 18765.0f, 18766.0f, 18767.0f, 18768.0f, 18769.0f, 18770.0f, 18771.0f, 18772.0f, 18773.0f, 18774.0f, 18775.0f, 18776.0f, 18777.0f, 18778.0f, 18779.0f, 18780.0f, 18781.0f, 18782.0f, 18783.0f, 18784.0f, 18785.0f, 18786.0f, 18787.0f, 18788.0f, 18789.0f, 18790.0f, 18791.0f, 18792.0f, 18793.0f, 18794.0f, 18795.0f, 18796.0f, 18797.0f, 18798.0f, 18799.0f, 18800.0f, 18801.0f, 18802.0f, 18803.0f, 18804.0f, 18805.0f, 18806.0f, 18807.0f, 18808.0f, 18809.0f, 18810.0f, 18811.0f, 18812.0f, 18813.0f, 18814.0f, 18815.0f, 18816.0f, 18817.0f, 18818.0f, 18819.0f, 18820.0f, 18821.0f, 18822.0f, 18823.0f, 18824.0f, 18825.0f, 18826.0f, 18827.0f, 18828.0f, 18829.0f, 18830.0f, 18831.0f, 18832.0f, 18833.0f, 18834.0f, 18835.0f, 18836.0f, 18837.0f, 18838.0f, 18839.0f, 18840.0f, 18841.0f, 18842.0f, 18843.0f, 18844.0f, 18845.0f, 18846.0f, 18847.0f, 18848.0f, 18849.0f, 18850.0f, 18851.0f, 18852.0f, 18853.0f, 18854.0f, 18855.0f, 18856.0f, 18857.0f, 18858.0f, 18859.0f, 18860.0f, 18861.0f, 18862.0f, 18863.0f, 18864.0f, 18865.0f, 18866.0f, 18867.0f, 18868.0f, 18869.0f, 18870.0f, 18871.0f, 18872.0f, 18873.0f, 18874.0f, 18875.0f, 18876.0f, 18877.0f, 18878.0f, 18879.0f, 18880.0f, 18881.0f, 18882.0f, 18883.0f, 18884.0f, 18885.0f, 18886.0f, 18887.0f, 18888.0f, 18889.0f, 18890.0f, 18891.0f, 18892.0f, 18893.0f, 18894.0f, 18895.0f, 18896.0f, 18897.0f, 18898.0f, 18899.0f, 18900.0f, 18901.0f, 18902.0f, 18903.0f, 18904.0f, 18905.0f, 18906.0f, 18907.0f, 18908.0f, 18909.0f, 18910.0f, 18911.0f, 18912.0f, 18913.0f, 18914.0f, 18915.0f, 18916.0f, 18917.0f, 18918.0f, 18919.0f, 18920.0f, 18921.0f, 18922.0f, 18923.0f, 18924.0f, 18925.0f, 18926.0f, 18927.0f, 18928.0f, 18929.0f, 18930.0f, 18931.0f, 18932.0f, 18933.0f, 18934.0f, 18935.0f, 18936.0f, 18937.0f, 18938.0f, 18939.0f, 18940.0f, 18941.0f, 18942.0f, 18943.0f, 18944.0f, 18945.0f, 18946.0f, 18947.0f, 18948.0f, 18949.0f, 18950.0f, 18951.0f, 18952.0f, 18953.0f, 18954.0f, 18955.0f, 18956.0f, 18957.0f, 18958.0f, 18959.0f, 18960.0f, 18961.0f, 18962.0f, 18963.0f, 18964.0f, 18965.0f, 18966.0f, 18967.0f, 18968.0f, 18969.0f, 18970.0f, 18971.0f, 18972.0f, 18973.0f, 18974.0f, 18975.0f, 18976.0f, 18977.0f, 18978.0f, 18979.0f, 18980.0f, 18981.0f, 18982.0f, 18983.0f, 18984.0f, 18985.0f, 18986.0f, 18987.0f, 18988.0f, 18989.0f, 18990.0f, 18991.0f, 18992.0f, 18993.0f, 18994.0f, 18995.0f, 18996.0f, 18997.0f, 18998.0f, 18999.0f, 19000.0f, 19001.0f, 19002.0f, 19003.0f, 19004.0f, 19005.0f, 19006.0f, 19007.0f, 19008.0f, 19009.0f, 19010.0f, 19011.0f, 19012.0f, 19013.0f, 19014.0f, 19015.0f, 19016.0f, 19017.0f, 19018.0f, 19019.0f, 19020.0f, 19021.0f, 19022.0f, 19023.0f, 19024.0f, 19025.0f, 19026.0f, 19027.0f, 19028.0f, 19029.0f, 19030.0f, 19031.0f, 19032.0f, 19033.0f, 19034.0f, 19035.0f, 19036.0f, 19037.0f, 19038.0f, 19039.0f, 19040.0f, 19041.0f, 19042.0f, 19043.0f, 19044.0f, 19045.0f, 19046.0f, 19047.0f, 19048.0f, 19049.0f, 19050.0f, 19051.0f, 19052.0f, 19053.0f, 19054.0f, 19055.0f, 19056.0f, 19057.0f, 19058.0f, 19059.0f, 19060.0f, 19061.0f, 19062.0f, 19063.0f, 19064.0f, 19065.0f, 19066.0f, 19067.0f, 19068.0f, 19069.0f, 19070.0f, 19071.0f, 19072.0f, 19073.0f, 19074.0f, 19075.0f, 19076.0f, 19077.0f, 19078.0f, 19079.0f, 19080.0f, 19081.0f, 19082.0f, 19083.0f, 19084.0f, 19085.0f, 19086.0f, 19087.0f, 19088.0f, 19089.0f, 19090.0f, 19091.0f, 19092.0f, 19093.0f, 19094.0f, 19095.0f, 19096.0f, 19097.0f, 19098.0f, 19099.0f, 19100.0f, 19101.0f, 19102.0f, 19103.0f, 19104.0f, 19105.0f, 19106.0f, 19107.0f, 19108.0f, 19109.0f, 19110.0f, 19111.0f, 19112.0f, 19113.0f, 19114.0f, 19115.0f, 19116.0f, 19117.0f, 19118.0f, 19119.0f, 19120.0f, 19121.0f, 19122.0f, 19123.0f, 19124.0f, 19125.0f, 19126.0f, 19127.0f, 19128.0f, 19129.0f, 19130.0f, 19131.0f, 19132.0f, 19133.0f, 19134.0f, 19135.0f, 19136.0f, 19137.0f, 19138.0f, 19139.0f, 19140.0f, 19141.0f, 19142.0f, 19143.0f, 19144.0f, 19145.0f, 19146.0f, 19147.0f, 19148.0f, 19149.0f, 19150.0f, 19151.0f, 19152.0f, 19153.0f, 19154.0f, 19155.0f, 19156.0f, 19157.0f, 19158.0f, 19159.0f, 19160.0f, 19161.0f, 19162.0f, 19163.0f, 19164.0f, 19165.0f, 19166.0f, 19167.0f, 19168.0f, 19169.0f, 19170.0f, 19171.0f, 19172.0f, 19173.0f, 19174.0f, 19175.0f, 19176.0f, 19177.0f, 19178.0f, 19179.0f, 19180.0f, 19181.0f, 19182.0f, 19183.0f, 19184.0f, 19185.0f, 19186.0f, 19187.0f, 19188.0f, 19189.0f, 19190.0f, 19191.0f, 19192.0f, 19193.0f, 19194.0f, 19195.0f, 19196.0f, 19197.0f, 19198.0f, 19199.0f, 19200.0f, 19201.0f, 19202.0f, 19203.0f, 19204.0f, 19205.0f, 19206.0f, 19207.0f, 19208.0f, 19209.0f, 19210.0f, 19211.0f, 19212.0f, 19213.0f, 19214.0f, 19215.0f, 19216.0f, 19217.0f, 19218.0f, 19219.0f, 19220.0f, 19221.0f, 19222.0f, 19223.0f, 19224.0f, 19225.0f, 19226.0f, 19227.0f, 19228.0f, 19229.0f, 19230.0f, 19231.0f, 19232.0f, 19233.0f, 19234.0f, 19235.0f, 19236.0f, 19237.0f, 19238.0f, 19239.0f, 19240.0f, 19241.0f, 19242.0f, 19243.0f, 19244.0f, 19245.0f, 19246.0f, 19247.0f, 19248.0f, 19249.0f, 19250.0f, 19251.0f, 19252.0f, 19253.0f, 19254.0f, 19255.0f, 19256.0f, 19257.0f, 19258.0f, 19259.0f, 19260.0f, 19261.0f, 19262.0f, 19263.0f, 19264.0f, 19265.0f, 19266.0f, 19267.0f, 19268.0f, 19269.0f, 19270.0f, 19271.0f, 19272.0f, 19273.0f, 19274.0f, 19275.0f, 19276.0f, 19277.0f, 19278.0f, 19279.0f, 19280.0f, 19281.0f, 19282.0f, 19283.0f, 19284.0f, 19285.0f, 19286.0f, 19287.0f, 19288.0f, 19289.0f, 19290.0f, 19291.0f, 19292.0f, 19293.0f, 19294.0f, 19295.0f, 19296.0f, 19297.0f, 19298.0f, 19299.0f, 19300.0f, 19301.0f, 19302.0f, 19303.0f, 19304.0f, 19305.0f, 19306.0f, 19307.0f, 19308.0f, 19309.0f, 19310.0f, 19311.0f, 19312.0f, 19313.0f, 19314.0f, 19315.0f, 19316.0f, 19317.0f, 19318.0f, 19319.0f, 19320.0f, 19321.0f, 19322.0f, 19323.0f, 19324.0f, 19325.0f, 19326.0f, 19327.0f, 19328.0f, 19329.0f, 19330.0f, 19331.0f, 19332.0f, 19333.0f, 19334.0f, 19335.0f, 19336.0f, 19337.0f, 19338.0f, 19339.0f, 19340.0f, 19341.0f, 19342.0f, 19343.0f, 19344.0f, 19345.0f, 19346.0f, 19347.0f, 19348.0f, 19349.0f, 19350.0f, 19351.0f, 19352.0f, 19353.0f, 19354.0f, 19355.0f, 19356.0f, 19357.0f, 19358.0f, 19359.0f, 19360.0f, 19361.0f, 19362.0f, 19363.0f, 19364.0f, 19365.0f, 19366.0f, 19367.0f, 19368.0f, 19369.0f, 19370.0f, 19371.0f, 19372.0f, 19373.0f, 19374.0f, 19375.0f, 19376.0f, 19377.0f, 19378.0f, 19379.0f, 19380.0f, 19381.0f, 19382.0f, 19383.0f, 19384.0f, 19385.0f, 19386.0f, 19387.0f, 19388.0f, 19389.0f, 19390.0f, 19391.0f, 19392.0f, 19393.0f, 19394.0f, 19395.0f, 19396.0f, 19397.0f, 19398.0f, 19399.0f, 19400.0f, 19401.0f, 19402.0f, 19403.0f, 19404.0f, 19405.0f, 19406.0f, 19407.0f, 19408.0f, 19409.0f, 19410.0f, 19411.0f, 19412.0f, 19413.0f, 19414.0f, 19415.0f, 19416.0f, 19417.0f, 19418.0f, 19419.0f, 19420.0f, 19421.0f, 19422.0f, 19423.0f, 19424.0f, 19425.0f, 19426.0f, 19427.0f, 19428.0f, 19429.0f, 19430.0f, 19431.0f, 19432.0f, 19433.0f, 19434.0f, 19435.0f, 19436.0f, 19437.0f, 19438.0f, 19439.0f, 19440.0f, 19441.0f, 19442.0f, 19443.0f, 19444.0f, 19445.0f, 19446.0f, 19447.0f, 19448.0f, 19449.0f, 19450.0f, 19451.0f, 19452.0f, 19453.0f, 19454.0f, 19455.0f, 19456.0f, 19457.0f, 19458.0f, 19459.0f, 19460.0f, 19461.0f, 19462.0f, 19463.0f, 19464.0f, 19465.0f, 19466.0f, 19467.0f, 19468.0f, 19469.0f, 19470.0f, 19471.0f, 19472.0f, 19473.0f, 19474.0f, 19475.0f, 19476.0f, 19477.0f, 19478.0f, 19479.0f, 19480.0f, 19481.0f, 19482.0f, 19483.0f, 19484.0f, 19485.0f, 19486.0f, 19487.0f, 19488.0f, 19489.0f, 19490.0f, 19491.0f, 19492.0f, 19493.0f, 19494.0f, 19495.0f, 19496.0f, 19497.0f, 19498.0f, 19499.0f, 19500.0f, 19501.0f, 19502.0f, 19503.0f, 19504.0f, 19505.0f, 19506.0f, 19507.0f, 19508.0f, 19509.0f, 19510.0f, 19511.0f, 19512.0f, 19513.0f, 19514.0f, 19515.0f, 19516.0f, 19517.0f, 19518.0f, 19519.0f, 19520.0f, 19521.0f, 19522.0f, 19523.0f, 19524.0f, 19525.0f, 19526.0f, 19527.0f, 19528.0f, 19529.0f, 19530.0f, 19531.0f, 19532.0f, 19533.0f, 19534.0f, 19535.0f, 19536.0f, 19537.0f, 19538.0f, 19539.0f, 19540.0f, 19541.0f, 19542.0f, 19543.0f, 19544.0f, 19545.0f, 19546.0f, 19547.0f, 19548.0f, 19549.0f, 19550.0f, 19551.0f, 19552.0f, 19553.0f, 19554.0f, 19555.0f, 19556.0f, 19557.0f, 19558.0f, 19559.0f, 19560.0f, 19561.0f, 19562.0f, 19563.0f, 19564.0f, 19565.0f, 19566.0f, 19567.0f, 19568.0f, 19569.0f, 19570.0f, 19571.0f, 19572.0f, 19573.0f, 19574.0f, 19575.0f, 19576.0f, 19577.0f, 19578.0f, 19579.0f, 19580.0f, 19581.0f, 19582.0f, 19583.0f, 19584.0f, 19585.0f, 19586.0f, 19587.0f, 19588.0f, 19589.0f, 19590.0f, 19591.0f, 19592.0f, 19593.0f, 19594.0f, 19595.0f, 19596.0f, 19597.0f, 19598.0f, 19599.0f, 19600.0f, 19601.0f, 19602.0f, 19603.0f, 19604.0f, 19605.0f, 19606.0f, 19607.0f, 19608.0f, 19609.0f, 19610.0f, 19611.0f, 19612.0f, 19613.0f, 19614.0f, 19615.0f, 19616.0f, 19617.0f, 19618.0f, 19619.0f, 19620.0f, 19621.0f, 19622.0f, 19623.0f, 19624.0f, 19625.0f, 19626.0f, 19627.0f, 19628.0f, 19629.0f, 19630.0f, 19631.0f, 19632.0f, 19633.0f, 19634.0f, 19635.0f, 19636.0f, 19637.0f, 19638.0f, 19639.0f, 19640.0f, 19641.0f, 19642.0f, 19643.0f, 19644.0f, 19645.0f, 19646.0f, 19647.0f, 19648.0f, 19649.0f, 19650.0f, 19651.0f, 19652.0f, 19653.0f, 19654.0f, 19655.0f, 19656.0f, 19657.0f, 19658.0f, 19659.0f, 19660.0f, 19661.0f, 19662.0f, 19663.0f, 19664.0f, 19665.0f, 19666.0f, 19667.0f, 19668.0f, 19669.0f, 19670.0f, 19671.0f, 19672.0f, 19673.0f, 19674.0f, 19675.0f, 19676.0f, 19677.0f, 19678.0f, 19679.0f, 19680.0f, 19681.0f, 19682.0f, 19683.0f, 19684.0f, 19685.0f, 19686.0f, 19687.0f, 19688.0f, 19689.0f, 19690.0f, 19691.0f, 19692.0f, 19693.0f, 19694.0f, 19695.0f, 19696.0f, 19697.0f, 19698.0f, 19699.0f, 19700.0f, 19701.0f, 19702.0f, 19703.0f, 19704.0f, 19705.0f, 19706.0f, 19707.0f, 19708.0f, 19709.0f, 19710.0f, 19711.0f, 19712.0f, 19713.0f, 19714.0f, 19715.0f, 19716.0f, 19717.0f, 19718.0f, 19719.0f, 19720.0f, 19721.0f, 19722.0f, 19723.0f, 19724.0f, 19725.0f, 19726.0f, 19727.0f, 19728.0f, 19729.0f, 19730.0f, 19731.0f, 19732.0f, 19733.0f, 19734.0f, 19735.0f, 19736.0f, 19737.0f, 19738.0f, 19739.0f, 19740.0f, 19741.0f, 19742.0f, 19743.0f, 19744.0f, 19745.0f, 19746.0f, 19747.0f, 19748.0f, 19749.0f, 19750.0f, 19751.0f, 19752.0f, 19753.0f, 19754.0f, 19755.0f, 19756.0f, 19757.0f, 19758.0f, 19759.0f, 19760.0f, 19761.0f, 19762.0f, 19763.0f, 19764.0f, 19765.0f, 19766.0f, 19767.0f, 19768.0f, 19769.0f, 19770.0f, 19771.0f, 19772.0f, 19773.0f, 19774.0f, 19775.0f, 19776.0f, 19777.0f, 19778.0f, 19779.0f, 19780.0f, 19781.0f, 19782.0f, 19783.0f, 19784.0f, 19785.0f, 19786.0f, 19787.0f, 19788.0f, 19789.0f, 19790.0f, 19791.0f, 19792.0f, 19793.0f, 19794.0f, 19795.0f, 19796.0f, 19797.0f, 19798.0f, 19799.0f, 19800.0f, 19801.0f, 19802.0f, 19803.0f, 19804.0f, 19805.0f, 19806.0f, 19807.0f, 19808.0f, 19809.0f, 19810.0f, 19811.0f, 19812.0f, 19813.0f, 19814.0f, 19815.0f, 19816.0f, 19817.0f, 19818.0f, 19819.0f, 19820.0f, 19821.0f, 19822.0f, 19823.0f, 19824.0f, 19825.0f, 19826.0f, 19827.0f, 19828.0f, 19829.0f, 19830.0f, 19831.0f, 19832.0f, 19833.0f, 19834.0f, 19835.0f, 19836.0f, 19837.0f, 19838.0f, 19839.0f, 19840.0f, 19841.0f, 19842.0f, 19843.0f, 19844.0f, 19845.0f, 19846.0f, 19847.0f, 19848.0f, 19849.0f, 19850.0f, 19851.0f, 19852.0f, 19853.0f, 19854.0f, 19855.0f, 19856.0f, 19857.0f, 19858.0f, 19859.0f, 19860.0f, 19861.0f, 19862.0f, 19863.0f, 19864.0f, 19865.0f, 19866.0f, 19867.0f, 19868.0f, 19869.0f, 19870.0f, 19871.0f, 19872.0f, 19873.0f, 19874.0f, 19875.0f, 19876.0f, 19877.0f, 19878.0f, 19879.0f, 19880.0f, 19881.0f, 19882.0f, 19883.0f, 19884.0f, 19885.0f, 19886.0f, 19887.0f, 19888.0f, 19889.0f, 19890.0f, 19891.0f, 19892.0f, 19893.0f, 19894.0f, 19895.0f, 19896.0f, 19897.0f, 19898.0f, 19899.0f, 19900.0f, 19901.0f, 19902.0f, 19903.0f, 19904.0f, 19905.0f, 19906.0f, 19907.0f, 19908.0f, 19909.0f, 19910.0f, 19911.0f, 19912.0f, 19913.0f, 19914.0f, 19915.0f, 19916.0f, 19917.0f, 19918.0f, 19919.0f, 19920.0f, 19921.0f, 19922.0f, 19923.0f, 19924.0f, 19925.0f, 19926.0f, 19927.0f, 19928.0f, 19929.0f, 19930.0f, 19931.0f, 19932.0f, 19933.0f, 19934.0f, 19935.0f, 19936.0f, 19937.0f, 19938.0f, 19939.0f, 19940.0f, 19941.0f, 19942.0f, 19943.0f, 19944.0f, 19945.0f, 19946.0f, 19947.0f, 19948.0f, 19949.0f, 19950.0f, 19951.0f, 19952.0f, 19953.0f, 19954.0f, 19955.0f, 19956.0f, 19957.0f, 19958.0f, 19959.0f, 19960.0f, 19961.0f, 19962.0f, 19963.0f, 19964.0f, 19965.0f, 19966.0f, 19967.0f, 19968.0f, 19969.0f, 19970.0f, 19971.0f, 19972.0f, 19973.0f, 19974.0f, 19975.0f, 19976.0f, 19977.0f, 19978.0f, 19979.0f, 19980.0f, 19981.0f, 19982.0f, 19983.0f, 19984.0f, 19985.0f, 19986.0f, 19987.0f, 19988.0f, 19989.0f, 19990.0f, 19991.0f, 19992.0f, 19993.0f, 19994.0f, 19995.0f, 19996.0f, 19997.0f, 19998.0f, 19999.0f, 20000.0f, 20001.0f, 20002.0f, 20003.0f, 20004.0f, 20005.0f, 20006.0f, 20007.0f, 20008.0f, 20009.0f, 20010.0f, 20011.0f, 20012.0f, 20013.0f, 20014.0f, 20015.0f, 20016.0f, 20017.0f, 20018.0f, 20019.0f, 20020.0f, 20021.0f, 20022.0f, 20023.0f, 20024.0f, 20025.0f, 20026.0f, 20027.0f, 20028.0f, 20029.0f, 20030.0f, 20031.0f, 20032.0f, 20033.0f, 20034.0f, 20035.0f, 20036.0f, 20037.0f, 20038.0f, 20039.0f, 20040.0f, 20041.0f, 20042.0f, 20043.0f, 20044.0f, 20045.0f, 20046.0f, 20047.0f, 20048.0f, 20049.0f, 20050.0f, 20051.0f, 20052.0f, 20053.0f, 20054.0f, 20055.0f, 20056.0f, 20057.0f, 20058.0f, 20059.0f, 20060.0f, 20061.0f, 20062.0f, 20063.0f, 20064.0f, 20065.0f, 20066.0f, 20067.0f, 20068.0f, 20069.0f, 20070.0f, 20071.0f, 20072.0f, 20073.0f, 20074.0f, 20075.0f, 20076.0f, 20077.0f, 20078.0f, 20079.0f, 20080.0f, 20081.0f, 20082.0f, 20083.0f, 20084.0f, 20085.0f, 20086.0f, 20087.0f, 20088.0f, 20089.0f, 20090.0f, 20091.0f, 20092.0f, 20093.0f, 20094.0f, 20095.0f, 20096.0f, 20097.0f, 20098.0f, 20099.0f, 20100.0f, 20101.0f, 20102.0f, 20103.0f, 20104.0f, 20105.0f, 20106.0f, 20107.0f, 20108.0f, 20109.0f, 20110.0f, 20111.0f, 20112.0f, 20113.0f, 20114.0f, 20115.0f, 20116.0f, 20117.0f, 20118.0f, 20119.0f, 20120.0f, 20121.0f, 20122.0f, 20123.0f, 20124.0f, 20125.0f, 20126.0f, 20127.0f, 20128.0f, 20129.0f, 20130.0f, 20131.0f, 20132.0f, 20133.0f, 20134.0f, 20135.0f, 20136.0f, 20137.0f, 20138.0f, 20139.0f, 20140.0f, 20141.0f, 20142.0f, 20143.0f, 20144.0f, 20145.0f, 20146.0f, 20147.0f, 20148.0f, 20149.0f, 20150.0f, 20151.0f, 20152.0f, 20153.0f, 20154.0f, 20155.0f, 20156.0f, 20157.0f, 20158.0f, 20159.0f, 20160.0f, 20161.0f, 20162.0f, 20163.0f, 20164.0f, 20165.0f, 20166.0f, 20167.0f, 20168.0f, 20169.0f, 20170.0f, 20171.0f, 20172.0f, 20173.0f, 20174.0f, 20175.0f, 20176.0f, 20177.0f, 20178.0f, 20179.0f, 20180.0f, 20181.0f, 20182.0f, 20183.0f, 20184.0f, 20185.0f, 20186.0f, 20187.0f, 20188.0f, 20189.0f, 20190.0f, 20191.0f, 20192.0f, 20193.0f, 20194.0f, 20195.0f, 20196.0f, 20197.0f, 20198.0f, 20199.0f, 20200.0f, 20201.0f, 20202.0f, 20203.0f, 20204.0f, 20205.0f, 20206.0f, 20207.0f, 20208.0f, 20209.0f, 20210.0f, 20211.0f, 20212.0f, 20213.0f, 20214.0f, 20215.0f, 20216.0f, 20217.0f, 20218.0f, 20219.0f, 20220.0f, 20221.0f, 20222.0f, 20223.0f, 20224.0f, 20225.0f, 20226.0f, 20227.0f, 20228.0f, 20229.0f, 20230.0f, 20231.0f, 20232.0f, 20233.0f, 20234.0f, 20235.0f, 20236.0f, 20237.0f, 20238.0f, 20239.0f, 20240.0f, 20241.0f, 20242.0f, 20243.0f, 20244.0f, 20245.0f, 20246.0f, 20247.0f, 20248.0f, 20249.0f, 20250.0f, 20251.0f, 20252.0f, 20253.0f, 20254.0f, 20255.0f, 20256.0f, 20257.0f, 20258.0f, 20259.0f, 20260.0f, 20261.0f, 20262.0f, 20263.0f, 20264.0f, 20265.0f, 20266.0f, 20267.0f, 20268.0f, 20269.0f, 20270.0f, 20271.0f, 20272.0f, 20273.0f, 20274.0f, 20275.0f, 20276.0f, 20277.0f, 20278.0f, 20279.0f, 20280.0f, 20281.0f, 20282.0f, 20283.0f, 20284.0f, 20285.0f, 20286.0f, 20287.0f, 20288.0f, 20289.0f, 20290.0f, 20291.0f, 20292.0f, 20293.0f, 20294.0f, 20295.0f, 20296.0f, 20297.0f, 20298.0f, 20299.0f, 20300.0f, 20301.0f, 20302.0f, 20303.0f, 20304.0f, 20305.0f, 20306.0f, 20307.0f, 20308.0f, 20309.0f, 20310.0f, 20311.0f, 20312.0f, 20313.0f, 20314.0f, 20315.0f, 20316.0f, 20317.0f, 20318.0f, 20319.0f, 20320.0f, 20321.0f, 20322.0f, 20323.0f, 20324.0f, 20325.0f, 20326.0f, 20327.0f, 20328.0f, 20329.0f, 20330.0f, 20331.0f, 20332.0f, 20333.0f, 20334.0f, 20335.0f, 20336.0f, 20337.0f, 20338.0f, 20339.0f, 20340.0f, 20341.0f, 20342.0f, 20343.0f, 20344.0f, 20345.0f, 20346.0f, 20347.0f, 20348.0f, 20349.0f, 20350.0f, 20351.0f, 20352.0f, 20353.0f, 20354.0f, 20355.0f, 20356.0f, 20357.0f, 20358.0f, 20359.0f, 20360.0f, 20361.0f, 20362.0f, 20363.0f, 20364.0f, 20365.0f, 20366.0f, 20367.0f, 20368.0f, 20369.0f, 20370.0f, 20371.0f, 20372.0f, 20373.0f, 20374.0f, 20375.0f, 20376.0f, 20377.0f, 20378.0f, 20379.0f, 20380.0f, 20381.0f, 20382.0f, 20383.0f, 20384.0f, 20385.0f, 20386.0f, 20387.0f, 20388.0f, 20389.0f, 20390.0f, 20391.0f, 20392.0f, 20393.0f, 20394.0f, 20395.0f, 20396.0f, 20397.0f, 20398.0f, 20399.0f, 20400.0f, 20401.0f, 20402.0f, 20403.0f, 20404.0f, 20405.0f, 20406.0f, 20407.0f, 20408.0f, 20409.0f, 20410.0f, 20411.0f, 20412.0f, 20413.0f, 20414.0f, 20415.0f, 20416.0f, 20417.0f, 20418.0f, 20419.0f, 20420.0f, 20421.0f, 20422.0f, 20423.0f, 20424.0f, 20425.0f, 20426.0f, 20427.0f, 20428.0f, 20429.0f, 20430.0f, 20431.0f, 20432.0f, 20433.0f, 20434.0f, 20435.0f, 20436.0f, 20437.0f, 20438.0f, 20439.0f, 20440.0f, 20441.0f, 20442.0f, 20443.0f, 20444.0f, 20445.0f, 20446.0f, 20447.0f, 20448.0f, 20449.0f, 20450.0f, 20451.0f, 20452.0f, 20453.0f, 20454.0f, 20455.0f, 20456.0f, 20457.0f, 20458.0f, 20459.0f, 20460.0f, 20461.0f, 20462.0f, 20463.0f, 20464.0f, 20465.0f, 20466.0f, 20467.0f, 20468.0f, 20469.0f, 20470.0f, 20471.0f, 20472.0f, 20473.0f, 20474.0f, 20475.0f, 20476.0f, 20477.0f, 20478.0f, 20479.0f, 20480.0f, 20481.0f, 20482.0f, 20483.0f, 20484.0f, 20485.0f, 20486.0f, 20487.0f, 20488.0f, 20489.0f, 20490.0f, 20491.0f, 20492.0f, 20493.0f, 20494.0f, 20495.0f, 20496.0f, 20497.0f, 20498.0f, 20499.0f, 20500.0f, 20501.0f, 20502.0f, 20503.0f, 20504.0f, 20505.0f, 20506.0f, 20507.0f, 20508.0f, 20509.0f, 20510.0f, 20511.0f, 20512.0f, 20513.0f, 20514.0f, 20515.0f, 20516.0f, 20517.0f, 20518.0f, 20519.0f, 20520.0f, 20521.0f, 20522.0f, 20523.0f, 20524.0f, 20525.0f, 20526.0f, 20527.0f, 20528.0f, 20529.0f, 20530.0f, 20531.0f, 20532.0f, 20533.0f, 20534.0f, 20535.0f, 20536.0f, 20537.0f, 20538.0f, 20539.0f, 20540.0f, 20541.0f, 20542.0f, 20543.0f, 20544.0f, 20545.0f, 20546.0f, 20547.0f, 20548.0f, 20549.0f, 20550.0f, 20551.0f, 20552.0f, 20553.0f, 20554.0f, 20555.0f, 20556.0f, 20557.0f, 20558.0f, 20559.0f, 20560.0f, 20561.0f, 20562.0f, 20563.0f, 20564.0f, 20565.0f, 20566.0f, 20567.0f, 20568.0f, 20569.0f, 20570.0f, 20571.0f, 20572.0f, 20573.0f, 20574.0f, 20575.0f, 20576.0f, 20577.0f, 20578.0f, 20579.0f, 20580.0f, 20581.0f, 20582.0f, 20583.0f, 20584.0f, 20585.0f, 20586.0f, 20587.0f, 20588.0f, 20589.0f, 20590.0f, 20591.0f, 20592.0f, 20593.0f, 20594.0f, 20595.0f, 20596.0f, 20597.0f, 20598.0f, 20599.0f, 20600.0f, 20601.0f, 20602.0f, 20603.0f, 20604.0f, 20605.0f, 20606.0f, 20607.0f, 20608.0f, 20609.0f, 20610.0f, 20611.0f, 20612.0f, 20613.0f, 20614.0f, 20615.0f, 20616.0f, 20617.0f, 20618.0f, 20619.0f, 20620.0f, 20621.0f, 20622.0f, 20623.0f, 20624.0f, 20625.0f, 20626.0f, 20627.0f, 20628.0f, 20629.0f, 20630.0f, 20631.0f, 20632.0f, 20633.0f, 20634.0f, 20635.0f, 20636.0f, 20637.0f, 20638.0f, 20639.0f, 20640.0f, 20641.0f, 20642.0f, 20643.0f, 20644.0f, 20645.0f, 20646.0f, 20647.0f, 20648.0f, 20649.0f, 20650.0f, 20651.0f, 20652.0f, 20653.0f, 20654.0f, 20655.0f, 20656.0f, 20657.0f, 20658.0f, 20659.0f, 20660.0f, 20661.0f, 20662.0f, 20663.0f, 20664.0f, 20665.0f, 20666.0f, 20667.0f, 20668.0f, 20669.0f, 20670.0f, 20671.0f, 20672.0f, 20673.0f, 20674.0f, 20675.0f, 20676.0f, 20677.0f, 20678.0f, 20679.0f, 20680.0f, 20681.0f, 20682.0f, 20683.0f, 20684.0f, 20685.0f, 20686.0f, 20687.0f, 20688.0f, 20689.0f, 20690.0f, 20691.0f, 20692.0f, 20693.0f, 20694.0f, 20695.0f, 20696.0f, 20697.0f, 20698.0f, 20699.0f, 20700.0f, 20701.0f, 20702.0f, 20703.0f, 20704.0f, 20705.0f, 20706.0f, 20707.0f, 20708.0f, 20709.0f, 20710.0f, 20711.0f, 20712.0f, 20713.0f, 20714.0f, 20715.0f, 20716.0f, 20717.0f, 20718.0f, 20719.0f, 20720.0f, 20721.0f, 20722.0f, 20723.0f, 20724.0f, 20725.0f, 20726.0f, 20727.0f, 20728.0f, 20729.0f, 20730.0f, 20731.0f, 20732.0f, 20733.0f, 20734.0f, 20735.0f, 20736.0f, 20737.0f, 20738.0f, 20739.0f, 20740.0f, 20741.0f, 20742.0f, 20743.0f, 20744.0f, 20745.0f, 20746.0f, 20747.0f, 20748.0f, 20749.0f, 20750.0f, 20751.0f, 20752.0f, 20753.0f, 20754.0f, 20755.0f, 20756.0f, 20757.0f, 20758.0f, 20759.0f, 20760.0f, 20761.0f, 20762.0f, 20763.0f, 20764.0f, 20765.0f, 20766.0f, 20767.0f, 20768.0f, 20769.0f, 20770.0f, 20771.0f, 20772.0f, 20773.0f, 20774.0f, 20775.0f, 20776.0f, 20777.0f, 20778.0f, 20779.0f, 20780.0f, 20781.0f, 20782.0f, 20783.0f, 20784.0f, 20785.0f, 20786.0f, 20787.0f, 20788.0f, 20789.0f, 20790.0f, 20791.0f, 20792.0f, 20793.0f, 20794.0f, 20795.0f, 20796.0f, 20797.0f, 20798.0f, 20799.0f, 20800.0f, 20801.0f, 20802.0f, 20803.0f, 20804.0f, 20805.0f, 20806.0f, 20807.0f, 20808.0f, 20809.0f, 20810.0f, 20811.0f, 20812.0f, 20813.0f, 20814.0f, 20815.0f, 20816.0f, 20817.0f, 20818.0f, 20819.0f, 20820.0f, 20821.0f, 20822.0f, 20823.0f, 20824.0f, 20825.0f, 20826.0f, 20827.0f, 20828.0f, 20829.0f, 20830.0f, 20831.0f, 20832.0f, 20833.0f, 20834.0f, 20835.0f, 20836.0f, 20837.0f, 20838.0f, 20839.0f, 20840.0f, 20841.0f, 20842.0f, 20843.0f, 20844.0f, 20845.0f, 20846.0f, 20847.0f, 20848.0f, 20849.0f, 20850.0f, 20851.0f, 20852.0f, 20853.0f, 20854.0f, 20855.0f, 20856.0f, 20857.0f, 20858.0f, 20859.0f, 20860.0f, 20861.0f, 20862.0f, 20863.0f, 20864.0f, 20865.0f, 20866.0f, 20867.0f, 20868.0f, 20869.0f, 20870.0f, 20871.0f, 20872.0f, 20873.0f, 20874.0f, 20875.0f, 20876.0f, 20877.0f, 20878.0f, 20879.0f, 20880.0f, 20881.0f, 20882.0f, 20883.0f, 20884.0f, 20885.0f, 20886.0f, 20887.0f, 20888.0f, 20889.0f, 20890.0f, 20891.0f, 20892.0f, 20893.0f, 20894.0f, 20895.0f, 20896.0f, 20897.0f, 20898.0f, 20899.0f, 20900.0f, 20901.0f, 20902.0f, 20903.0f, 20904.0f, 20905.0f, 20906.0f, 20907.0f, 20908.0f, 20909.0f, 20910.0f, 20911.0f, 20912.0f, 20913.0f, 20914.0f, 20915.0f, 20916.0f, 20917.0f, 20918.0f, 20919.0f, 20920.0f, 20921.0f, 20922.0f, 20923.0f, 20924.0f, 20925.0f, 20926.0f, 20927.0f, 20928.0f, 20929.0f, 20930.0f, 20931.0f, 20932.0f, 20933.0f, 20934.0f, 20935.0f, 20936.0f, 20937.0f, 20938.0f, 20939.0f, 20940.0f, 20941.0f, 20942.0f, 20943.0f, 20944.0f, 20945.0f, 20946.0f, 20947.0f, 20948.0f, 20949.0f, 20950.0f, 20951.0f, 20952.0f, 20953.0f, 20954.0f, 20955.0f, 20956.0f, 20957.0f, 20958.0f, 20959.0f, 20960.0f, 20961.0f, 20962.0f, 20963.0f, 20964.0f, 20965.0f, 20966.0f, 20967.0f, 20968.0f, 20969.0f, 20970.0f, 20971.0f, 20972.0f, 20973.0f, 20974.0f, 20975.0f, 20976.0f, 20977.0f, 20978.0f, 20979.0f, 20980.0f, 20981.0f, 20982.0f, 20983.0f, 20984.0f, 20985.0f, 20986.0f, 20987.0f, 20988.0f, 20989.0f, 20990.0f, 20991.0f, 20992.0f, 20993.0f, 20994.0f, 20995.0f, 20996.0f, 20997.0f, 20998.0f, 20999.0f, 21000.0f, 21001.0f, 21002.0f, 21003.0f, 21004.0f, 21005.0f, 21006.0f, 21007.0f, 21008.0f, 21009.0f, 21010.0f, 21011.0f, 21012.0f, 21013.0f, 21014.0f, 21015.0f, 21016.0f, 21017.0f, 21018.0f, 21019.0f, 21020.0f, 21021.0f, 21022.0f, 21023.0f, 21024.0f, 21025.0f, 21026.0f, 21027.0f, 21028.0f, 21029.0f, 21030.0f, 21031.0f, 21032.0f, 21033.0f, 21034.0f, 21035.0f, 21036.0f, 21037.0f, 21038.0f, 21039.0f, 21040.0f, 21041.0f, 21042.0f, 21043.0f, 21044.0f, 21045.0f, 21046.0f, 21047.0f, 21048.0f, 21049.0f, 21050.0f, 21051.0f, 21052.0f, 21053.0f, 21054.0f, 21055.0f, 21056.0f, 21057.0f, 21058.0f, 21059.0f, 21060.0f, 21061.0f, 21062.0f, 21063.0f, 21064.0f, 21065.0f, 21066.0f, 21067.0f, 21068.0f, 21069.0f, 21070.0f, 21071.0f, 21072.0f, 21073.0f, 21074.0f, 21075.0f, 21076.0f, 21077.0f, 21078.0f, 21079.0f, 21080.0f, 21081.0f, 21082.0f, 21083.0f, 21084.0f, 21085.0f, 21086.0f, 21087.0f, 21088.0f, 21089.0f, 21090.0f, 21091.0f, 21092.0f, 21093.0f, 21094.0f, 21095.0f, 21096.0f, 21097.0f, 21098.0f, 21099.0f, 21100.0f, 21101.0f, 21102.0f, 21103.0f, 21104.0f, 21105.0f, 21106.0f, 21107.0f, 21108.0f, 21109.0f, 21110.0f, 21111.0f, 21112.0f, 21113.0f, 21114.0f, 21115.0f, 21116.0f, 21117.0f, 21118.0f, 21119.0f, 21120.0f, 21121.0f, 21122.0f, 21123.0f, 21124.0f, 21125.0f, 21126.0f, 21127.0f, 21128.0f, 21129.0f, 21130.0f, 21131.0f, 21132.0f, 21133.0f, 21134.0f, 21135.0f, 21136.0f, 21137.0f, 21138.0f, 21139.0f, 21140.0f, 21141.0f, 21142.0f, 21143.0f, 21144.0f, 21145.0f, 21146.0f, 21147.0f, 21148.0f, 21149.0f, 21150.0f, 21151.0f, 21152.0f, 21153.0f, 21154.0f, 21155.0f, 21156.0f, 21157.0f, 21158.0f, 21159.0f}}},
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/concat_float16_3.example.cpp b/nn/runtime/test/generated/examples/concat_float16_3.example.cpp
index b4476c8..651079b 100644
--- a/nn/runtime/test/generated/examples/concat_float16_3.example.cpp
+++ b/nn/runtime/test/generated/examples/concat_float16_3.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: concat_float16_3.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -12,7 +13,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, 56.0f, 57.0f, 58.0f, 59.0f, 60.0f, 61.0f, 62.0f, 63.0f, 64.0f, 65.0f, 66.0f, 67.0f, 68.0f, 69.0f, 70.0f, 71.0f, 72.0f, 73.0f, 74.0f, 75.0f, 76.0f, 77.0f, 78.0f, 79.0f, 80.0f, 81.0f, 82.0f, 83.0f, 84.0f, 85.0f, 86.0f, 87.0f, 88.0f, 89.0f, 90.0f, 91.0f, 92.0f, 93.0f, 94.0f, 95.0f, 96.0f, 97.0f, 98.0f, 99.0f, 100.0f, 101.0f, 102.0f, 103.0f, 104.0f, 105.0f, 106.0f, 107.0f, 108.0f, 109.0f, 110.0f, 111.0f, 112.0f, 113.0f, 114.0f, 115.0f, 116.0f, 117.0f, 118.0f, 119.0f, 120.0f, 121.0f, 122.0f, 123.0f, 124.0f, 125.0f, 126.0f, 127.0f, 128.0f, 129.0f, 130.0f, 131.0f, 132.0f, 133.0f, 134.0f, 135.0f, 136.0f, 137.0f, 138.0f, 139.0f, 140.0f, 141.0f, 142.0f, 143.0f, 144.0f, 145.0f, 146.0f, 147.0f, 148.0f, 149.0f, 150.0f, 151.0f, 152.0f, 153.0f, 154.0f, 155.0f, 156.0f, 157.0f, 158.0f, 159.0f, 160.0f, 161.0f, 162.0f, 163.0f, 164.0f, 165.0f, 166.0f, 167.0f, 168.0f, 169.0f, 170.0f, 171.0f, 172.0f, 173.0f, 174.0f, 175.0f, 176.0f, 177.0f, 178.0f, 179.0f, 180.0f, 181.0f, 182.0f, 183.0f, 184.0f, 185.0f, 186.0f, 187.0f, 188.0f, 189.0f, 190.0f, 191.0f, 192.0f, 193.0f, 194.0f, 195.0f, 196.0f, 197.0f, 198.0f, 199.0f, 200.0f, 201.0f, 202.0f, 203.0f, 204.0f, 205.0f, 206.0f, 207.0f, 208.0f, 209.0f, 210.0f, 211.0f, 212.0f, 213.0f, 214.0f, 215.0f, 216.0f, 217.0f, 218.0f, 219.0f, 220.0f, 221.0f, 222.0f, 223.0f, 224.0f, 225.0f, 226.0f, 227.0f, 228.0f, 229.0f, 230.0f, 231.0f, 232.0f, 233.0f, 234.0f, 235.0f, 236.0f, 237.0f, 238.0f, 239.0f, 240.0f, 241.0f, 242.0f, 243.0f, 244.0f, 245.0f, 246.0f, 247.0f, 248.0f, 249.0f, 250.0f, 251.0f, 252.0f, 253.0f, 254.0f, 255.0f, 256.0f, 257.0f, 258.0f, 259.0f, 260.0f, 261.0f, 262.0f, 263.0f, 264.0f, 265.0f, 266.0f, 267.0f, 268.0f, 269.0f, 270.0f, 271.0f, 272.0f, 273.0f, 274.0f, 275.0f, 276.0f, 277.0f, 278.0f, 279.0f, 280.0f, 281.0f, 282.0f, 283.0f, 284.0f, 285.0f, 286.0f, 287.0f, 288.0f, 289.0f, 290.0f, 291.0f, 292.0f, 293.0f, 294.0f, 295.0f, 296.0f, 297.0f, 298.0f, 299.0f, 300.0f, 301.0f, 302.0f, 303.0f, 304.0f, 305.0f, 306.0f, 307.0f, 308.0f, 309.0f, 310.0f, 311.0f, 312.0f, 313.0f, 314.0f, 315.0f, 316.0f, 317.0f, 318.0f, 319.0f, 320.0f, 321.0f, 322.0f, 323.0f, 324.0f, 325.0f, 326.0f, 327.0f, 328.0f, 329.0f, 330.0f, 331.0f, 332.0f, 333.0f, 334.0f, 335.0f, 336.0f, 337.0f, 338.0f, 339.0f, 340.0f, 341.0f, 342.0f, 343.0f, 344.0f, 345.0f, 346.0f, 347.0f, 348.0f, 349.0f, 350.0f, 351.0f, 352.0f, 353.0f, 354.0f, 355.0f, 356.0f, 357.0f, 358.0f, 359.0f, 360.0f, 361.0f, 362.0f, 363.0f, 364.0f, 365.0f, 366.0f, 367.0f, 368.0f, 369.0f, 370.0f, 371.0f, 372.0f, 373.0f, 374.0f, 375.0f, 376.0f, 377.0f, 378.0f, 379.0f, 380.0f, 381.0f, 382.0f, 383.0f, 384.0f, 385.0f, 386.0f, 387.0f, 388.0f, 389.0f, 390.0f, 391.0f, 392.0f, 393.0f, 394.0f, 395.0f, 396.0f, 397.0f, 398.0f, 399.0f, 400.0f, 401.0f, 402.0f, 403.0f, 404.0f, 405.0f, 406.0f, 407.0f, 408.0f, 409.0f, 410.0f, 411.0f, 412.0f, 413.0f, 414.0f, 415.0f, 416.0f, 417.0f, 418.0f, 419.0f, 420.0f, 421.0f, 422.0f, 423.0f, 424.0f, 425.0f, 426.0f, 427.0f, 428.0f, 429.0f, 430.0f, 431.0f, 432.0f, 433.0f, 434.0f, 435.0f, 436.0f, 437.0f, 438.0f, 439.0f, 440.0f, 441.0f, 442.0f, 443.0f, 444.0f, 445.0f, 446.0f, 447.0f, 448.0f, 449.0f, 450.0f, 451.0f, 452.0f, 453.0f, 454.0f, 455.0f, 456.0f, 457.0f, 458.0f, 459.0f, 460.0f, 461.0f, 462.0f, 463.0f, 464.0f, 465.0f, 466.0f, 467.0f, 468.0f, 469.0f, 470.0f, 471.0f, 472.0f, 473.0f, 474.0f, 475.0f, 476.0f, 477.0f, 478.0f, 479.0f, 480.0f, 481.0f, 482.0f, 483.0f, 484.0f, 485.0f, 486.0f, 487.0f, 488.0f, 489.0f, 490.0f, 491.0f, 492.0f, 493.0f, 494.0f, 495.0f, 496.0f, 497.0f, 498.0f, 499.0f, 500.0f, 501.0f, 502.0f, 503.0f, 504.0f, 505.0f, 506.0f, 507.0f, 508.0f, 509.0f, 510.0f, 511.0f, 512.0f, 513.0f, 514.0f, 515.0f, 516.0f, 517.0f, 518.0f, 519.0f, 520.0f, 521.0f, 522.0f, 523.0f, 524.0f, 525.0f, 526.0f, 527.0f, 528.0f, 529.0f, 530.0f, 531.0f, 532.0f, 533.0f, 534.0f, 535.0f, 536.0f, 537.0f, 538.0f, 539.0f, 540.0f, 541.0f, 542.0f, 543.0f, 544.0f, 545.0f, 546.0f, 547.0f, 548.0f, 549.0f, 550.0f, 551.0f, 552.0f, 553.0f, 554.0f, 555.0f, 556.0f, 557.0f, 558.0f, 559.0f, 560.0f, 561.0f, 562.0f, 563.0f, 564.0f, 565.0f, 566.0f, 567.0f, 568.0f, 569.0f, 570.0f, 571.0f, 572.0f, 573.0f, 574.0f, 575.0f, 576.0f, 577.0f, 578.0f, 579.0f, 580.0f, 581.0f, 582.0f, 583.0f, 584.0f, 585.0f, 586.0f, 587.0f, 588.0f, 589.0f, 590.0f, 591.0f, 592.0f, 593.0f, 594.0f, 595.0f, 596.0f, 597.0f, 598.0f, 599.0f, 600.0f, 601.0f, 602.0f, 603.0f, 604.0f, 605.0f, 606.0f, 607.0f, 608.0f, 609.0f, 610.0f, 611.0f, 612.0f, 613.0f, 614.0f, 615.0f, 616.0f, 617.0f, 618.0f, 619.0f, 620.0f, 621.0f, 622.0f, 623.0f, 624.0f, 625.0f, 626.0f, 627.0f, 628.0f, 629.0f, 630.0f, 631.0f, 632.0f, 633.0f, 634.0f, 635.0f, 636.0f, 637.0f, 638.0f, 639.0f, 640.0f, 641.0f, 642.0f, 643.0f, 644.0f, 645.0f, 646.0f, 647.0f, 648.0f, 649.0f, 650.0f, 651.0f, 652.0f, 653.0f, 654.0f, 655.0f, 656.0f, 657.0f, 658.0f, 659.0f, 660.0f, 661.0f, 662.0f, 663.0f, 664.0f, 665.0f, 666.0f, 667.0f, 668.0f, 669.0f, 670.0f, 671.0f, 672.0f, 673.0f, 674.0f, 675.0f, 676.0f, 677.0f, 678.0f, 679.0f, 680.0f, 681.0f, 682.0f, 683.0f, 684.0f, 685.0f, 686.0f, 687.0f, 688.0f, 689.0f, 690.0f, 691.0f, 692.0f, 693.0f, 694.0f, 695.0f, 696.0f, 697.0f, 698.0f, 699.0f, 700.0f, 701.0f, 702.0f, 703.0f, 704.0f, 705.0f, 706.0f, 707.0f, 708.0f, 709.0f, 710.0f, 711.0f, 712.0f, 713.0f, 714.0f, 715.0f, 716.0f, 717.0f, 718.0f, 719.0f, 720.0f, 721.0f, 722.0f, 723.0f, 724.0f, 725.0f, 726.0f, 727.0f, 728.0f, 729.0f, 730.0f, 731.0f, 732.0f, 733.0f, 734.0f, 735.0f, 736.0f, 737.0f, 738.0f, 739.0f, 740.0f, 741.0f, 742.0f, 743.0f, 744.0f, 745.0f, 746.0f, 747.0f, 748.0f, 749.0f, 750.0f, 751.0f, 752.0f, 753.0f, 754.0f, 755.0f, 756.0f, 757.0f, 758.0f, 759.0f, 760.0f, 761.0f, 762.0f, 763.0f, 764.0f, 765.0f, 766.0f, 767.0f, 768.0f, 769.0f, 770.0f, 771.0f, 772.0f, 773.0f, 774.0f, 775.0f, 776.0f, 777.0f, 778.0f, 779.0f, 780.0f, 781.0f, 782.0f, 783.0f, 784.0f, 785.0f, 786.0f, 787.0f, 788.0f, 789.0f, 790.0f, 791.0f, 792.0f, 793.0f, 794.0f, 795.0f, 796.0f, 797.0f, 798.0f, 799.0f, 800.0f, 801.0f, 802.0f, 803.0f, 804.0f, 805.0f, 806.0f, 807.0f, 808.0f, 809.0f, 810.0f, 811.0f, 812.0f, 813.0f, 814.0f, 815.0f, 816.0f, 817.0f, 818.0f, 819.0f, 820.0f, 821.0f, 822.0f, 823.0f, 824.0f, 825.0f, 826.0f, 827.0f, 828.0f, 829.0f, 830.0f, 831.0f, 832.0f, 833.0f, 834.0f, 835.0f, 836.0f, 837.0f, 838.0f, 839.0f, 840.0f, 841.0f, 842.0f, 843.0f, 844.0f, 845.0f, 846.0f, 847.0f, 848.0f, 849.0f, 850.0f, 851.0f, 852.0f, 853.0f, 854.0f, 855.0f, 856.0f, 857.0f, 858.0f, 859.0f, 860.0f, 861.0f, 862.0f, 863.0f, 864.0f, 865.0f, 866.0f, 867.0f, 868.0f, 869.0f, 870.0f, 871.0f, 872.0f, 873.0f, 874.0f, 875.0f, 876.0f, 877.0f, 878.0f, 879.0f, 880.0f, 881.0f, 882.0f, 883.0f, 884.0f, 885.0f, 886.0f, 887.0f, 888.0f, 889.0f, 890.0f, 891.0f, 892.0f, 893.0f, 894.0f, 895.0f, 896.0f, 897.0f, 898.0f, 899.0f, 900.0f, 901.0f, 902.0f, 903.0f, 904.0f, 905.0f, 906.0f, 907.0f, 908.0f, 909.0f, 910.0f, 911.0f, 912.0f, 913.0f, 914.0f, 915.0f, 916.0f, 917.0f, 918.0f, 919.0f, 920.0f, 921.0f, 922.0f, 923.0f, 924.0f, 925.0f, 926.0f, 927.0f, 928.0f, 929.0f, 930.0f, 931.0f, 932.0f, 933.0f, 934.0f, 935.0f, 936.0f, 937.0f, 938.0f, 939.0f, 940.0f, 941.0f, 942.0f, 943.0f, 944.0f, 945.0f, 946.0f, 947.0f, 948.0f, 949.0f, 950.0f, 951.0f, 952.0f, 953.0f, 954.0f, 955.0f, 956.0f, 957.0f, 958.0f, 959.0f, 960.0f, 961.0f, 962.0f, 963.0f, 964.0f, 965.0f, 966.0f, 967.0f, 968.0f, 969.0f, 970.0f, 971.0f, 972.0f, 973.0f, 974.0f, 975.0f, 976.0f, 977.0f, 978.0f, 979.0f, 980.0f, 981.0f, 982.0f, 983.0f, 984.0f, 985.0f, 986.0f, 987.0f, 988.0f, 989.0f, 990.0f, 991.0f, 992.0f, 993.0f, 994.0f, 995.0f, 996.0f, 997.0f, 998.0f, 999.0f, 1000.0f, 1001.0f, 1002.0f, 1003.0f, 1004.0f, 1005.0f, 1006.0f, 1007.0f, 1008.0f, 1009.0f, 1010.0f, 1011.0f, 1012.0f, 1013.0f, 1014.0f, 1015.0f, 1016.0f, 1017.0f, 1018.0f, 1019.0f, 1020.0f, 1021.0f, 1022.0f, 1023.0f, 1024.0f, 1025.0f, 1026.0f, 1027.0f, 1028.0f, 1029.0f, 1030.0f, 1031.0f, 1032.0f, 1033.0f, 1034.0f, 1035.0f, 1036.0f, 1037.0f, 1038.0f, 1039.0f, 1040.0f, 1041.0f, 1042.0f, 1043.0f, 1044.0f, 1045.0f, 1046.0f, 1047.0f, 1048.0f, 1049.0f, 1050.0f, 1051.0f, 1052.0f, 1053.0f, 1054.0f, 1055.0f, 1056.0f, 1057.0f, 1058.0f, 1059.0f, 1060.0f, 1061.0f, 1062.0f, 1063.0f, 1064.0f, 1065.0f, 1066.0f, 1067.0f, 1068.0f, 1069.0f, 1070.0f, 1071.0f, 1072.0f, 1073.0f, 1074.0f, 1075.0f, 1076.0f, 1077.0f, 1078.0f, 1079.0f, 1080.0f, 1081.0f, 1082.0f, 1083.0f, 1084.0f, 1085.0f, 1086.0f, 1087.0f, 1088.0f, 1089.0f, 1090.0f, 1091.0f, 1092.0f, 1093.0f, 1094.0f, 1095.0f, 1096.0f, 1097.0f, 1098.0f, 1099.0f, 1100.0f, 1101.0f, 1102.0f, 1103.0f, 1104.0f, 1105.0f, 1106.0f, 1107.0f, 1108.0f, 1109.0f, 1110.0f, 1111.0f, 1112.0f, 1113.0f, 1114.0f, 1115.0f, 1116.0f, 1117.0f, 1118.0f, 1119.0f, 1120.0f, 1121.0f, 1122.0f, 1123.0f, 1124.0f, 1125.0f, 1126.0f, 1127.0f, 1128.0f, 1129.0f, 1130.0f, 1131.0f, 1132.0f, 1133.0f, 1134.0f, 1135.0f, 1136.0f, 1137.0f, 1138.0f, 1139.0f, 1140.0f, 1141.0f, 1142.0f, 1143.0f, 1144.0f, 1145.0f, 1146.0f, 1147.0f, 1148.0f, 1149.0f, 1150.0f, 1151.0f, 1152.0f, 1153.0f, 1154.0f, 1155.0f, 1156.0f, 1157.0f, 1158.0f, 1159.0f, 1160.0f, 1161.0f, 1162.0f, 1163.0f, 1164.0f, 1165.0f, 1166.0f, 1167.0f, 1168.0f, 1169.0f, 1170.0f, 1171.0f, 1172.0f, 1173.0f, 1174.0f, 1175.0f, 1176.0f, 1177.0f, 1178.0f, 1179.0f, 1180.0f, 1181.0f, 1182.0f, 1183.0f, 1184.0f, 1185.0f, 1186.0f, 1187.0f, 1188.0f, 1189.0f, 1190.0f, 1191.0f, 1192.0f, 1193.0f, 1194.0f, 1195.0f, 1196.0f, 1197.0f, 1198.0f, 1199.0f, 1200.0f, 1201.0f, 1202.0f, 1203.0f, 1204.0f, 1205.0f, 1206.0f, 1207.0f, 1208.0f, 1209.0f, 1210.0f, 1211.0f, 1212.0f, 1213.0f, 1214.0f, 1215.0f, 1216.0f, 1217.0f, 1218.0f, 1219.0f, 1220.0f, 1221.0f, 1222.0f, 1223.0f, 1224.0f, 1225.0f, 1226.0f, 1227.0f, 1228.0f, 1229.0f, 1230.0f, 1231.0f, 1232.0f, 1233.0f, 1234.0f, 1235.0f, 1236.0f, 1237.0f, 1238.0f, 1239.0f, 1240.0f, 1241.0f, 1242.0f, 1243.0f, 1244.0f, 1245.0f, 1246.0f, 1247.0f, 1248.0f, 1249.0f, 1250.0f, 1251.0f, 1252.0f, 1253.0f, 1254.0f, 1255.0f, 1256.0f, 1257.0f, 1258.0f, 1259.0f, 1260.0f, 1261.0f, 1262.0f, 1263.0f, 1264.0f, 1265.0f, 1266.0f, 1267.0f, 1268.0f, 1269.0f, 1270.0f, 1271.0f, 1272.0f, 1273.0f, 1274.0f, 1275.0f, 1276.0f, 1277.0f, 1278.0f, 1279.0f, 1280.0f, 1281.0f, 1282.0f, 1283.0f, 1284.0f, 1285.0f, 1286.0f, 1287.0f, 1288.0f, 1289.0f, 1290.0f, 1291.0f, 1292.0f, 1293.0f, 1294.0f, 1295.0f, 1296.0f, 1297.0f, 1298.0f, 1299.0f, 1300.0f, 1301.0f, 1302.0f, 1303.0f, 1304.0f, 1305.0f, 1306.0f, 1307.0f, 1308.0f, 1309.0f, 1310.0f, 1311.0f, 1312.0f, 1313.0f, 1314.0f, 1315.0f, 1316.0f, 1317.0f, 1318.0f, 1319.0f, 1320.0f, 1321.0f, 1322.0f, 1323.0f, 1324.0f, 1325.0f, 1326.0f, 1327.0f, 1328.0f, 1329.0f, 1330.0f, 1331.0f, 1332.0f, 1333.0f, 1334.0f, 1335.0f, 1336.0f, 1337.0f, 1338.0f, 1339.0f, 1340.0f, 1341.0f, 1342.0f, 1343.0f, 1344.0f, 1345.0f, 1346.0f, 1347.0f, 1348.0f, 1349.0f, 1350.0f, 1351.0f, 1352.0f, 1353.0f, 1354.0f, 1355.0f, 1356.0f, 1357.0f, 1358.0f, 1359.0f, 1360.0f, 1361.0f, 1362.0f, 1363.0f, 1364.0f, 1365.0f, 1366.0f, 1367.0f, 1368.0f, 1369.0f, 1370.0f, 1371.0f, 1372.0f, 1373.0f, 1374.0f, 1375.0f, 1376.0f, 1377.0f, 1378.0f, 1379.0f, 1380.0f, 1381.0f, 1382.0f, 1383.0f, 1384.0f, 1385.0f, 1386.0f, 1387.0f, 1388.0f, 1389.0f, 1390.0f, 1391.0f, 1392.0f, 1393.0f, 1394.0f, 1395.0f, 1396.0f, 1397.0f, 1398.0f, 1399.0f, 1400.0f, 1401.0f, 1402.0f, 1403.0f, 1404.0f, 1405.0f, 1406.0f, 1407.0f, 1408.0f, 1409.0f, 1410.0f, 1411.0f, 1412.0f, 1413.0f, 1414.0f, 1415.0f, 1416.0f, 1417.0f, 1418.0f, 1419.0f, 1420.0f, 1421.0f, 1422.0f, 1423.0f, 1424.0f, 1425.0f, 1426.0f, 1427.0f, 1428.0f, 1429.0f, 1430.0f, 1431.0f, 1432.0f, 1433.0f, 1434.0f, 1435.0f, 1436.0f, 1437.0f, 1438.0f, 1439.0f, 1440.0f, 1441.0f, 1442.0f, 1443.0f, 1444.0f, 1445.0f, 1446.0f, 1447.0f, 1448.0f, 1449.0f, 1450.0f, 1451.0f, 1452.0f, 1453.0f, 1454.0f, 1455.0f, 1456.0f, 1457.0f, 1458.0f, 1459.0f, 1460.0f, 1461.0f, 1462.0f, 1463.0f, 1464.0f, 1465.0f, 1466.0f, 1467.0f, 1468.0f, 1469.0f, 1470.0f, 1471.0f, 1472.0f, 1473.0f, 1474.0f, 1475.0f, 1476.0f, 1477.0f, 1478.0f, 1479.0f, 1480.0f, 1481.0f, 1482.0f, 1483.0f, 1484.0f, 1485.0f, 1486.0f, 1487.0f, 1488.0f, 1489.0f, 1490.0f, 1491.0f, 1492.0f, 1493.0f, 1494.0f, 1495.0f, 1496.0f, 1497.0f, 1498.0f, 1499.0f, 1500.0f, 1501.0f, 1502.0f, 1503.0f, 1504.0f, 1505.0f, 1506.0f, 1507.0f, 1508.0f, 1509.0f, 1510.0f, 1511.0f, 1512.0f, 1513.0f, 1514.0f, 1515.0f, 1516.0f, 1517.0f, 1518.0f, 1519.0f, 1520.0f, 1521.0f, 1522.0f, 1523.0f, 1524.0f, 1525.0f, 1526.0f, 1527.0f, 1528.0f, 1529.0f, 1530.0f, 1531.0f, 1532.0f, 1533.0f, 1534.0f, 1535.0f, 1536.0f, 1537.0f, 1538.0f, 1539.0f, 1540.0f, 1541.0f, 1542.0f, 1543.0f, 1544.0f, 1545.0f, 1546.0f, 1547.0f, 1548.0f, 1549.0f, 1550.0f, 1551.0f, 1552.0f, 1553.0f, 1554.0f, 1555.0f, 1556.0f, 1557.0f, 1558.0f, 1559.0f, 1560.0f, 1561.0f, 1562.0f, 1563.0f, 1564.0f, 1565.0f, 1566.0f, 1567.0f, 1568.0f, 1569.0f, 1570.0f, 1571.0f, 1572.0f, 1573.0f, 1574.0f, 1575.0f, 1576.0f, 1577.0f, 1578.0f, 1579.0f, 1580.0f, 1581.0f, 1582.0f, 1583.0f, 1584.0f, 1585.0f, 1586.0f, 1587.0f, 1588.0f, 1589.0f, 1590.0f, 1591.0f, 1592.0f, 1593.0f, 1594.0f, 1595.0f, 1596.0f, 1597.0f, 1598.0f, 1599.0f, 1600.0f, 1601.0f, 1602.0f, 1603.0f, 1604.0f, 1605.0f, 1606.0f, 1607.0f, 1608.0f, 1609.0f, 1610.0f, 1611.0f, 1612.0f, 1613.0f, 1614.0f, 1615.0f, 1616.0f, 1617.0f, 1618.0f, 1619.0f, 1620.0f, 1621.0f, 1622.0f, 1623.0f, 1624.0f, 1625.0f, 1626.0f, 1627.0f, 1628.0f, 1629.0f, 1630.0f, 1631.0f, 1632.0f, 1633.0f, 1634.0f, 1635.0f, 1636.0f, 1637.0f, 1638.0f, 1639.0f, 1640.0f, 1641.0f, 1642.0f, 1643.0f, 1644.0f, 1645.0f, 1646.0f, 1647.0f, 1648.0f, 1649.0f, 1650.0f, 1651.0f, 1652.0f, 1653.0f, 1654.0f, 1655.0f, 1656.0f, 1657.0f, 1658.0f, 1659.0f, 1660.0f, 1661.0f, 1662.0f, 1663.0f, 1664.0f, 1665.0f, 1666.0f, 1667.0f, 1668.0f, 1669.0f, 1670.0f, 1671.0f, 1672.0f, 1673.0f, 1674.0f, 1675.0f, 1676.0f, 1677.0f, 1678.0f, 1679.0f, 1680.0f, 1681.0f, 1682.0f, 1683.0f, 1684.0f, 1685.0f, 1686.0f, 1687.0f, 1688.0f, 1689.0f, 1690.0f, 1691.0f, 1692.0f, 1693.0f, 1694.0f, 1695.0f, 1696.0f, 1697.0f, 1698.0f, 1699.0f, 1700.0f, 1701.0f, 1702.0f, 1703.0f, 1704.0f, 1705.0f, 1706.0f, 1707.0f, 1708.0f, 1709.0f, 1710.0f, 1711.0f, 1712.0f, 1713.0f, 1714.0f, 1715.0f, 1716.0f, 1717.0f, 1718.0f, 1719.0f, 1720.0f, 1721.0f, 1722.0f, 1723.0f, 1724.0f, 1725.0f, 1726.0f, 1727.0f, 1728.0f, 1729.0f, 1730.0f, 1731.0f, 1732.0f, 1733.0f, 1734.0f, 1735.0f, 1736.0f, 1737.0f, 1738.0f, 1739.0f, 1740.0f, 1741.0f, 1742.0f, 1743.0f, 1744.0f, 1745.0f, 1746.0f, 1747.0f, 1748.0f, 1749.0f, 1750.0f, 1751.0f, 1752.0f, 1753.0f, 1754.0f, 1755.0f, 1756.0f, 1757.0f, 1758.0f, 1759.0f, 1760.0f, 1761.0f, 1762.0f, 1763.0f, 1764.0f, 1765.0f, 1766.0f, 1767.0f, 1768.0f, 1769.0f, 1770.0f, 1771.0f, 1772.0f, 1773.0f, 1774.0f, 1775.0f, 1776.0f, 1777.0f, 1778.0f, 1779.0f, 1780.0f, 1781.0f, 1782.0f, 1783.0f, 1784.0f, 1785.0f, 1786.0f, 1787.0f, 1788.0f, 1789.0f, 1790.0f, 1791.0f, 1792.0f, 1793.0f, 1794.0f, 1795.0f, 1796.0f, 1797.0f, 1798.0f, 1799.0f, 1800.0f, 1801.0f, 1802.0f, 1803.0f, 1804.0f, 1805.0f, 1806.0f, 1807.0f, 1808.0f, 1809.0f, 1810.0f, 1811.0f, 1812.0f, 1813.0f, 1814.0f, 1815.0f, 1816.0f, 1817.0f, 1818.0f, 1819.0f, 1820.0f, 1821.0f, 1822.0f, 1823.0f, 1824.0f, 1825.0f, 1826.0f, 1827.0f, 1828.0f, 1829.0f, 1830.0f, 1831.0f, 1832.0f, 1833.0f, 1834.0f, 1835.0f, 1836.0f, 1837.0f, 1838.0f, 1839.0f, 1840.0f, 1841.0f, 1842.0f, 1843.0f, 1844.0f, 1845.0f, 1846.0f, 1847.0f, 1848.0f, 1849.0f, 1850.0f, 1851.0f, 1852.0f, 1853.0f, 1854.0f, 1855.0f, 1856.0f, 1857.0f, 1858.0f, 1859.0f, 1860.0f, 1861.0f, 1862.0f, 1863.0f, 1864.0f, 1865.0f, 1866.0f, 1867.0f, 1868.0f, 1869.0f, 1870.0f, 1871.0f, 1872.0f, 1873.0f, 1874.0f, 1875.0f, 1876.0f, 1877.0f, 1878.0f, 1879.0f, 1880.0f, 1881.0f, 1882.0f, 1883.0f, 1884.0f, 1885.0f, 1886.0f, 1887.0f, 1888.0f, 1889.0f, 1890.0f, 1891.0f, 1892.0f, 1893.0f, 1894.0f, 1895.0f, 1896.0f, 1897.0f, 1898.0f, 1899.0f, 1900.0f, 1901.0f, 1902.0f, 1903.0f, 1904.0f, 1905.0f, 1906.0f, 1907.0f, 1908.0f, 1909.0f, 1910.0f, 1911.0f, 1912.0f, 1913.0f, 1914.0f, 1915.0f, 1916.0f, 1917.0f, 1918.0f, 1919.0f, 1920.0f, 1921.0f, 1922.0f, 1923.0f, 1924.0f, 1925.0f, 1926.0f, 1927.0f, 1928.0f, 1929.0f, 1930.0f, 1931.0f, 1932.0f, 1933.0f, 1934.0f, 1935.0f, 1936.0f, 1937.0f, 1938.0f, 1939.0f, 1940.0f, 1941.0f, 1942.0f, 1943.0f, 1944.0f, 1945.0f, 1946.0f, 1947.0f, 1948.0f, 1949.0f, 1950.0f, 1951.0f, 1952.0f, 1953.0f, 1954.0f, 1955.0f, 1956.0f, 1957.0f, 1958.0f, 1959.0f, 1960.0f, 1961.0f, 1962.0f, 1963.0f, 1964.0f, 1965.0f, 1966.0f, 1967.0f, 1968.0f, 1969.0f, 1970.0f, 1971.0f, 1972.0f, 1973.0f, 1974.0f, 1975.0f, 1976.0f, 1977.0f, 1978.0f, 1979.0f, 1980.0f, 1981.0f, 1982.0f, 1983.0f, 1984.0f, 1985.0f, 1986.0f, 1987.0f, 1988.0f, 1989.0f, 1990.0f, 1991.0f, 1992.0f, 1993.0f, 1994.0f, 1995.0f, 1996.0f, 1997.0f, 1998.0f, 1999.0f, 2000.0f, 2001.0f, 2002.0f, 2003.0f, 2004.0f, 2005.0f, 2006.0f, 2007.0f, 2008.0f, 2009.0f, 2010.0f, 2011.0f, 2012.0f, 2013.0f, 2014.0f, 2015.0f, 2016.0f, 2017.0f, 2018.0f, 2019.0f, 2020.0f, 2021.0f, 2022.0f, 2023.0f, 2024.0f, 2025.0f, 2026.0f, 2027.0f, 2028.0f, 2029.0f, 2030.0f, 2031.0f, 2032.0f, 2033.0f, 2034.0f, 2035.0f, 2036.0f, 2037.0f, 2038.0f, 2039.0f, 2040.0f, 2041.0f, 2042.0f, 2043.0f, 2044.0f, 2045.0f, 2046.0f, 2047.0f, 2048.0f, 2049.0f, 2050.0f, 2051.0f, 2052.0f, 2053.0f, 2054.0f, 2055.0f, 2056.0f, 2057.0f, 2058.0f, 2059.0f, 2060.0f, 2061.0f, 2062.0f, 2063.0f, 2064.0f, 2065.0f, 2066.0f, 2067.0f, 2068.0f, 2069.0f, 2070.0f, 2071.0f, 2072.0f, 2073.0f, 2074.0f, 2075.0f, 2076.0f, 2077.0f, 2078.0f, 2079.0f, 2080.0f, 2081.0f, 2082.0f, 2083.0f, 2084.0f, 2085.0f, 2086.0f, 2087.0f, 2088.0f, 2089.0f, 2090.0f, 2091.0f, 2092.0f, 2093.0f, 2094.0f, 2095.0f, 2096.0f, 2097.0f, 2098.0f, 2099.0f, 2100.0f, 2101.0f, 2102.0f, 2103.0f, 2104.0f, 2105.0f, 2106.0f, 2107.0f, 2108.0f, 2109.0f, 2110.0f, 2111.0f, 2112.0f, 2113.0f, 2114.0f, 2115.0f, 2116.0f, 2117.0f, 2118.0f, 2119.0f, 2120.0f, 2121.0f, 2122.0f, 2123.0f, 2124.0f, 2125.0f, 2126.0f, 2127.0f, 2128.0f, 2129.0f, 2130.0f, 2131.0f, 2132.0f, 2133.0f, 2134.0f, 2135.0f, 2136.0f, 2137.0f, 2138.0f, 2139.0f, 2140.0f, 2141.0f, 2142.0f, 2143.0f, 2144.0f, 2145.0f, 2146.0f, 2147.0f, 2148.0f, 2149.0f, 2150.0f, 2151.0f, 2152.0f, 2153.0f, 2154.0f, 2155.0f, 2156.0f, 2157.0f, 2158.0f, 2159.0f, 2160.0f, 2161.0f, 2162.0f, 2163.0f, 2164.0f, 2165.0f, 2166.0f, 2167.0f, 2168.0f, 2169.0f, 2170.0f, 2171.0f, 2172.0f, 2173.0f, 2174.0f, 2175.0f, 2176.0f, 2177.0f, 2178.0f, 2179.0f, 2180.0f, 2181.0f, 2182.0f, 2183.0f, 2184.0f, 2185.0f, 2186.0f, 2187.0f, 2188.0f, 2189.0f, 2190.0f, 2191.0f, 2192.0f, 2193.0f, 2194.0f, 2195.0f, 2196.0f, 2197.0f, 2198.0f, 2199.0f, 2200.0f, 2201.0f, 2202.0f, 2203.0f, 2204.0f, 2205.0f, 2206.0f, 2207.0f, 2208.0f, 2209.0f, 2210.0f, 2211.0f, 2212.0f, 2213.0f, 2214.0f, 2215.0f, 2216.0f, 2217.0f, 2218.0f, 2219.0f, 2220.0f, 2221.0f, 2222.0f, 2223.0f, 2224.0f, 2225.0f, 2226.0f, 2227.0f, 2228.0f, 2229.0f, 2230.0f, 2231.0f, 2232.0f, 2233.0f, 2234.0f, 2235.0f, 2236.0f, 2237.0f, 2238.0f, 2239.0f, 2240.0f, 2241.0f, 2242.0f, 2243.0f, 2244.0f, 2245.0f, 2246.0f, 2247.0f, 2248.0f, 2249.0f, 2250.0f, 2251.0f, 2252.0f, 2253.0f, 2254.0f, 2255.0f, 2256.0f, 2257.0f, 2258.0f, 2259.0f, 2260.0f, 2261.0f, 2262.0f, 2263.0f, 2264.0f, 2265.0f, 2266.0f, 2267.0f, 2268.0f, 2269.0f, 2270.0f, 2271.0f, 2272.0f, 2273.0f, 2274.0f, 2275.0f, 2276.0f, 2277.0f, 2278.0f, 2279.0f, 2280.0f, 2281.0f, 2282.0f, 2283.0f, 2284.0f, 2285.0f, 2286.0f, 2287.0f, 2288.0f, 2289.0f, 2290.0f, 2291.0f, 2292.0f, 2293.0f, 2294.0f, 2295.0f, 2296.0f, 2297.0f, 2298.0f, 2299.0f, 2300.0f, 2301.0f, 2302.0f, 2303.0f, 2304.0f, 2305.0f, 2306.0f, 2307.0f, 2308.0f, 2309.0f, 2310.0f, 2311.0f, 2312.0f, 2313.0f, 2314.0f, 2315.0f, 2316.0f, 2317.0f, 2318.0f, 2319.0f, 2320.0f, 2321.0f, 2322.0f, 2323.0f, 2324.0f, 2325.0f, 2326.0f, 2327.0f, 2328.0f, 2329.0f, 2330.0f, 2331.0f, 2332.0f, 2333.0f, 2334.0f, 2335.0f, 2336.0f, 2337.0f, 2338.0f, 2339.0f, 2340.0f, 2341.0f, 2342.0f, 2343.0f, 2344.0f, 2345.0f, 2346.0f, 2347.0f, 2348.0f, 2349.0f, 2350.0f, 2351.0f, 2352.0f, 2353.0f, 2354.0f, 2355.0f, 2356.0f, 2357.0f, 2358.0f, 2359.0f, 2360.0f, 2361.0f, 2362.0f, 2363.0f, 2364.0f, 2365.0f, 2366.0f, 2367.0f, 2368.0f, 2369.0f, 2370.0f, 2371.0f, 2372.0f, 2373.0f, 2374.0f, 2375.0f, 2376.0f, 2377.0f, 2378.0f, 2379.0f, 2380.0f, 2381.0f, 2382.0f, 2383.0f, 2384.0f, 2385.0f, 2386.0f, 2387.0f, 2388.0f, 2389.0f, 2390.0f, 2391.0f, 2392.0f, 2393.0f, 2394.0f, 2395.0f, 2396.0f, 2397.0f, 2398.0f, 2399.0f, 2400.0f, 2401.0f, 2402.0f, 2403.0f, 2404.0f, 2405.0f, 2406.0f, 2407.0f, 2408.0f, 2409.0f, 2410.0f, 2411.0f, 2412.0f, 2413.0f, 2414.0f, 2415.0f, 2416.0f, 2417.0f, 2418.0f, 2419.0f, 2420.0f, 2421.0f, 2422.0f, 2423.0f, 2424.0f, 2425.0f, 2426.0f, 2427.0f, 2428.0f, 2429.0f, 2430.0f, 2431.0f, 2432.0f, 2433.0f, 2434.0f, 2435.0f, 2436.0f, 2437.0f, 2438.0f, 2439.0f, 2440.0f, 2441.0f, 2442.0f, 2443.0f, 2444.0f, 2445.0f, 2446.0f, 2447.0f, 2448.0f, 2449.0f, 2450.0f, 2451.0f, 2452.0f, 2453.0f, 2454.0f, 2455.0f, 2456.0f, 2457.0f, 2458.0f, 2459.0f, 2460.0f, 2461.0f, 2462.0f, 2463.0f, 2464.0f, 2465.0f, 2466.0f, 2467.0f, 2468.0f, 2469.0f, 2470.0f, 2471.0f, 2472.0f, 2473.0f, 2474.0f, 2475.0f, 2476.0f, 2477.0f, 2478.0f, 2479.0f, 2480.0f, 2481.0f, 2482.0f, 2483.0f, 2484.0f, 2485.0f, 2486.0f, 2487.0f, 2488.0f, 2489.0f, 2490.0f, 2491.0f, 2492.0f, 2493.0f, 2494.0f, 2495.0f, 2496.0f, 2497.0f, 2498.0f, 2499.0f, 2500.0f, 2501.0f, 2502.0f, 2503.0f, 2504.0f, 2505.0f, 2506.0f, 2507.0f, 2508.0f, 2509.0f, 2510.0f, 2511.0f, 2512.0f, 2513.0f, 2514.0f, 2515.0f, 2516.0f, 2517.0f, 2518.0f, 2519.0f, 2520.0f, 2521.0f, 2522.0f, 2523.0f, 2524.0f, 2525.0f, 2526.0f, 2527.0f, 2528.0f, 2529.0f, 2530.0f, 2531.0f, 2532.0f, 2533.0f, 2534.0f, 2535.0f, 2536.0f, 2537.0f, 2538.0f, 2539.0f, 2540.0f, 2541.0f, 2542.0f, 2543.0f, 2544.0f, 2545.0f, 2546.0f, 2547.0f, 2548.0f, 2549.0f, 2550.0f, 2551.0f, 2552.0f, 2553.0f, 2554.0f, 2555.0f, 2556.0f, 2557.0f, 2558.0f, 2559.0f, 2560.0f, 2561.0f, 2562.0f, 2563.0f, 2564.0f, 2565.0f, 2566.0f, 2567.0f, 2568.0f, 2569.0f, 2570.0f, 2571.0f, 2572.0f, 2573.0f, 2574.0f, 2575.0f, 2576.0f, 2577.0f, 2578.0f, 2579.0f, 2580.0f, 2581.0f, 2582.0f, 2583.0f, 2584.0f, 2585.0f, 2586.0f, 2587.0f, 2588.0f, 2589.0f, 2590.0f, 2591.0f, 2592.0f, 2593.0f, 2594.0f, 2595.0f, 2596.0f, 2597.0f, 2598.0f, 2599.0f, 2600.0f, 2601.0f, 2602.0f, 2603.0f, 2604.0f, 2605.0f, 2606.0f, 2607.0f, 2608.0f, 2609.0f, 2610.0f, 2611.0f, 2612.0f, 2613.0f, 2614.0f, 2615.0f, 2616.0f, 2617.0f, 2618.0f, 2619.0f, 2620.0f, 2621.0f, 2622.0f, 2623.0f, 2624.0f, 2625.0f, 2626.0f, 2627.0f, 2628.0f, 2629.0f, 2630.0f, 2631.0f, 2632.0f, 2633.0f, 2634.0f, 2635.0f, 2636.0f, 2637.0f, 2638.0f, 2639.0f, 2640.0f, 2641.0f, 2642.0f, 2643.0f, 2644.0f, 2645.0f, 2646.0f, 2647.0f, 2648.0f, 2649.0f, 2650.0f, 2651.0f, 2652.0f, 2653.0f, 2654.0f, 2655.0f, 2656.0f, 2657.0f, 2658.0f, 2659.0f, 2660.0f, 2661.0f, 2662.0f, 2663.0f, 2664.0f, 2665.0f, 2666.0f, 2667.0f, 2668.0f, 2669.0f, 2670.0f, 2671.0f, 2672.0f, 2673.0f, 2674.0f, 2675.0f, 2676.0f, 2677.0f, 2678.0f, 2679.0f, 2680.0f, 2681.0f, 2682.0f, 2683.0f, 2684.0f, 2685.0f, 2686.0f, 2687.0f, 2688.0f, 2689.0f, 2690.0f, 2691.0f, 2692.0f, 2693.0f, 2694.0f, 2695.0f, 2696.0f, 2697.0f, 2698.0f, 2699.0f, 2700.0f, 2701.0f, 2702.0f, 2703.0f, 2704.0f, 2705.0f, 2706.0f, 2707.0f, 2708.0f, 2709.0f, 2710.0f, 2711.0f, 2712.0f, 2713.0f, 2714.0f, 2715.0f, 2716.0f, 2717.0f, 2718.0f, 2719.0f, 2720.0f, 2721.0f, 2722.0f, 2723.0f, 2724.0f, 2725.0f, 2726.0f, 2727.0f, 2728.0f, 2729.0f, 2730.0f, 2731.0f, 2732.0f, 2733.0f, 2734.0f, 2735.0f, 2736.0f, 2737.0f, 2738.0f, 2739.0f, 2740.0f, 2741.0f, 2742.0f, 2743.0f, 2744.0f, 2745.0f, 2746.0f, 2747.0f, 2748.0f, 2749.0f, 2750.0f, 2751.0f, 2752.0f, 2753.0f, 2754.0f, 2755.0f, 2756.0f, 2757.0f, 2758.0f, 2759.0f, 2760.0f, 2761.0f, 2762.0f, 2763.0f, 2764.0f, 2765.0f, 2766.0f, 2767.0f, 2768.0f, 2769.0f, 2770.0f, 2771.0f, 2772.0f, 2773.0f, 2774.0f, 2775.0f, 2776.0f, 2777.0f, 2778.0f, 2779.0f, 2780.0f, 2781.0f, 2782.0f, 2783.0f, 2784.0f, 2785.0f, 2786.0f, 2787.0f, 2788.0f, 2789.0f, 2790.0f, 2791.0f, 2792.0f, 2793.0f, 2794.0f, 2795.0f, 2796.0f, 2797.0f, 2798.0f, 2799.0f, 2800.0f, 2801.0f, 2802.0f, 2803.0f, 2804.0f, 2805.0f, 2806.0f, 2807.0f, 2808.0f, 2809.0f, 2810.0f, 2811.0f, 2812.0f, 2813.0f, 2814.0f, 2815.0f, 2816.0f, 2817.0f, 2818.0f, 2819.0f, 2820.0f, 2821.0f, 2822.0f, 2823.0f, 2824.0f, 2825.0f, 2826.0f, 2827.0f, 2828.0f, 2829.0f, 2830.0f, 2831.0f, 2832.0f, 2833.0f, 2834.0f, 2835.0f, 2836.0f, 2837.0f, 2838.0f, 2839.0f, 2840.0f, 2841.0f, 2842.0f, 2843.0f, 2844.0f, 2845.0f, 2846.0f, 2847.0f, 2848.0f, 2849.0f, 2850.0f, 2851.0f, 2852.0f, 2853.0f, 2854.0f, 2855.0f, 2856.0f, 2857.0f, 2858.0f, 2859.0f, 2860.0f, 2861.0f, 2862.0f, 2863.0f, 2864.0f, 2865.0f, 2866.0f, 2867.0f, 2868.0f, 2869.0f, 2870.0f, 2871.0f, 2872.0f, 2873.0f, 2874.0f, 2875.0f, 2876.0f, 2877.0f, 2878.0f, 2879.0f, 2880.0f, 2881.0f, 2882.0f, 2883.0f, 2884.0f, 2885.0f, 2886.0f, 2887.0f, 2888.0f, 2889.0f, 2890.0f, 2891.0f, 2892.0f, 2893.0f, 2894.0f, 2895.0f, 2896.0f, 2897.0f, 2898.0f, 2899.0f, 2900.0f, 2901.0f, 2902.0f, 2903.0f, 2904.0f, 2905.0f, 2906.0f, 2907.0f, 2908.0f, 2909.0f, 2910.0f, 2911.0f, 2912.0f, 2913.0f, 2914.0f, 2915.0f, 2916.0f, 2917.0f, 2918.0f, 2919.0f, 2920.0f, 2921.0f, 2922.0f, 2923.0f, 2924.0f, 2925.0f, 2926.0f, 2927.0f, 2928.0f, 2929.0f, 2930.0f, 2931.0f, 2932.0f, 2933.0f, 2934.0f, 2935.0f, 2936.0f, 2937.0f, 2938.0f, 2939.0f, 2940.0f, 2941.0f, 2942.0f, 2943.0f, 2944.0f, 2945.0f, 2946.0f, 2947.0f, 2948.0f, 2949.0f, 2950.0f, 2951.0f, 2952.0f, 2953.0f, 2954.0f, 2955.0f, 2956.0f, 2957.0f, 2958.0f, 2959.0f, 2960.0f, 2961.0f, 2962.0f, 2963.0f, 2964.0f, 2965.0f, 2966.0f, 2967.0f, 2968.0f, 2969.0f, 2970.0f, 2971.0f, 2972.0f, 2973.0f, 2974.0f, 2975.0f, 2976.0f, 2977.0f, 2978.0f, 2979.0f, 2980.0f, 2981.0f, 2982.0f, 2983.0f, 2984.0f, 2985.0f, 2986.0f, 2987.0f, 2988.0f, 2989.0f, 2990.0f, 2991.0f, 2992.0f, 2993.0f, 2994.0f, 2995.0f, 2996.0f, 2997.0f, 2998.0f, 2999.0f, 3000.0f, 3001.0f, 3002.0f, 3003.0f, 3004.0f, 3005.0f, 3006.0f, 3007.0f, 3008.0f, 3009.0f, 3010.0f, 3011.0f, 3012.0f, 3013.0f, 3014.0f, 3015.0f, 3016.0f, 3017.0f, 3018.0f, 3019.0f, 3020.0f, 3021.0f, 3022.0f, 3023.0f, 3024.0f, 3025.0f, 3026.0f, 3027.0f, 3028.0f, 3029.0f, 3030.0f, 3031.0f, 3032.0f, 3033.0f, 3034.0f, 3035.0f, 3036.0f, 3037.0f, 3038.0f, 3039.0f, 3040.0f, 3041.0f, 3042.0f, 3043.0f, 3044.0f, 3045.0f, 3046.0f, 3047.0f, 3048.0f, 3049.0f, 3050.0f, 3051.0f, 3052.0f, 3053.0f, 3054.0f, 3055.0f, 3056.0f, 3057.0f, 3058.0f, 3059.0f, 3060.0f, 3061.0f, 3062.0f, 3063.0f, 3064.0f, 3065.0f, 3066.0f, 3067.0f, 3068.0f, 3069.0f, 3070.0f, 3071.0f, 3072.0f, 3073.0f, 3074.0f, 3075.0f, 3076.0f, 3077.0f, 3078.0f, 3079.0f, 3080.0f, 3081.0f, 3082.0f, 3083.0f, 3084.0f, 3085.0f, 3086.0f, 3087.0f, 3088.0f, 3089.0f, 3090.0f, 3091.0f, 3092.0f, 3093.0f, 3094.0f, 3095.0f, 3096.0f, 3097.0f, 3098.0f, 3099.0f, 3100.0f, 3101.0f, 3102.0f, 3103.0f, 3104.0f, 3105.0f, 3106.0f, 3107.0f, 3108.0f, 3109.0f, 3110.0f, 3111.0f, 3112.0f, 3113.0f, 3114.0f, 3115.0f, 3116.0f, 3117.0f, 3118.0f, 3119.0f, 3120.0f, 3121.0f, 3122.0f, 3123.0f, 3124.0f, 3125.0f, 3126.0f, 3127.0f, 3128.0f, 3129.0f, 3130.0f, 3131.0f, 3132.0f, 3133.0f, 3134.0f, 3135.0f, 3136.0f, 3137.0f, 3138.0f, 3139.0f, 3140.0f, 3141.0f, 3142.0f, 3143.0f, 3144.0f, 3145.0f, 3146.0f, 3147.0f, 3148.0f, 3149.0f, 3150.0f, 3151.0f, 3152.0f, 3153.0f, 3154.0f, 3155.0f, 3156.0f, 3157.0f, 3158.0f, 3159.0f, 3160.0f, 3161.0f, 3162.0f, 3163.0f, 3164.0f, 3165.0f, 3166.0f, 3167.0f, 3168.0f, 3169.0f, 3170.0f, 3171.0f, 3172.0f, 3173.0f, 3174.0f, 3175.0f, 3176.0f, 3177.0f, 3178.0f, 3179.0f, 3180.0f, 3181.0f, 3182.0f, 3183.0f, 3184.0f, 3185.0f, 3186.0f, 3187.0f, 3188.0f, 3189.0f, 3190.0f, 3191.0f, 3192.0f, 3193.0f, 3194.0f, 3195.0f, 3196.0f, 3197.0f, 3198.0f, 3199.0f, 3200.0f, 3201.0f, 3202.0f, 3203.0f, 3204.0f, 3205.0f, 3206.0f, 3207.0f, 3208.0f, 3209.0f, 3210.0f, 3211.0f, 3212.0f, 3213.0f, 3214.0f, 3215.0f, 3216.0f, 3217.0f, 3218.0f, 3219.0f, 3220.0f, 3221.0f, 3222.0f, 3223.0f, 3224.0f, 3225.0f, 3226.0f, 3227.0f, 3228.0f, 3229.0f, 3230.0f, 3231.0f, 3232.0f, 3233.0f, 3234.0f, 3235.0f, 3236.0f, 3237.0f, 3238.0f, 3239.0f, 3240.0f, 3241.0f, 3242.0f, 3243.0f, 3244.0f, 3245.0f, 3246.0f, 3247.0f, 3248.0f, 3249.0f, 3250.0f, 3251.0f, 3252.0f, 3253.0f, 3254.0f, 3255.0f, 3256.0f, 3257.0f, 3258.0f, 3259.0f, 3260.0f, 3261.0f, 3262.0f, 3263.0f, 3264.0f, 3265.0f, 3266.0f, 3267.0f, 3268.0f, 3269.0f, 3270.0f, 3271.0f, 3272.0f, 3273.0f, 3274.0f, 3275.0f, 3276.0f, 3277.0f, 3278.0f, 3279.0f, 3280.0f, 3281.0f, 3282.0f, 3283.0f, 3284.0f, 3285.0f, 3286.0f, 3287.0f, 3288.0f, 3289.0f, 3290.0f, 3291.0f, 3292.0f, 3293.0f, 3294.0f, 3295.0f, 3296.0f, 3297.0f, 3298.0f, 3299.0f, 3300.0f, 3301.0f, 3302.0f, 3303.0f, 3304.0f, 3305.0f, 3306.0f, 3307.0f, 3308.0f, 3309.0f, 3310.0f, 3311.0f, 3312.0f, 3313.0f, 3314.0f, 3315.0f, 3316.0f, 3317.0f, 3318.0f, 3319.0f, 3320.0f, 3321.0f, 3322.0f, 3323.0f, 3324.0f, 3325.0f, 3326.0f, 3327.0f, 3328.0f, 3329.0f, 3330.0f, 3331.0f, 3332.0f, 3333.0f, 3334.0f, 3335.0f, 3336.0f, 3337.0f, 3338.0f, 3339.0f, 3340.0f, 3341.0f, 3342.0f, 3343.0f, 3344.0f, 3345.0f, 3346.0f, 3347.0f, 3348.0f, 3349.0f, 3350.0f, 3351.0f, 3352.0f, 3353.0f, 3354.0f, 3355.0f, 3356.0f, 3357.0f, 3358.0f, 3359.0f, 3360.0f, 3361.0f, 3362.0f, 3363.0f, 3364.0f, 3365.0f, 3366.0f, 3367.0f, 3368.0f, 3369.0f, 3370.0f, 3371.0f, 3372.0f, 3373.0f, 3374.0f, 3375.0f, 3376.0f, 3377.0f, 3378.0f, 3379.0f, 3380.0f, 3381.0f, 3382.0f, 3383.0f, 3384.0f, 3385.0f, 3386.0f, 3387.0f, 3388.0f, 3389.0f, 3390.0f, 3391.0f, 3392.0f, 3393.0f, 3394.0f, 3395.0f, 3396.0f, 3397.0f, 3398.0f, 3399.0f, 3400.0f, 3401.0f, 3402.0f, 3403.0f, 3404.0f, 3405.0f, 3406.0f, 3407.0f, 3408.0f, 3409.0f, 3410.0f, 3411.0f, 3412.0f, 3413.0f, 3414.0f, 3415.0f, 3416.0f, 3417.0f, 3418.0f, 3419.0f, 3420.0f, 3421.0f, 3422.0f, 3423.0f, 3424.0f, 3425.0f, 3426.0f, 3427.0f, 3428.0f, 3429.0f, 3430.0f, 3431.0f, 3432.0f, 3433.0f, 3434.0f, 3435.0f, 3436.0f, 3437.0f, 3438.0f, 3439.0f, 3440.0f, 3441.0f, 3442.0f, 3443.0f, 3444.0f, 3445.0f, 3446.0f, 3447.0f, 3448.0f, 3449.0f, 3450.0f, 3451.0f, 3452.0f, 3453.0f, 3454.0f, 3455.0f, 3456.0f, 3457.0f, 3458.0f, 3459.0f, 3460.0f, 3461.0f, 3462.0f, 3463.0f, 3464.0f, 3465.0f, 3466.0f, 3467.0f, 3468.0f, 3469.0f, 3470.0f, 3471.0f, 3472.0f, 3473.0f, 3474.0f, 3475.0f, 3476.0f, 3477.0f, 3478.0f, 3479.0f, 3480.0f, 3481.0f, 3482.0f, 3483.0f, 3484.0f, 3485.0f, 3486.0f, 3487.0f, 3488.0f, 3489.0f, 3490.0f, 3491.0f, 3492.0f, 3493.0f, 3494.0f, 3495.0f, 3496.0f, 3497.0f, 3498.0f, 3499.0f, 3500.0f, 3501.0f, 3502.0f, 3503.0f, 3504.0f, 3505.0f, 3506.0f, 3507.0f, 3508.0f, 3509.0f, 3510.0f, 3511.0f, 3512.0f, 3513.0f, 3514.0f, 3515.0f, 3516.0f, 3517.0f, 3518.0f, 3519.0f, 3520.0f, 3521.0f, 3522.0f, 3523.0f, 3524.0f, 3525.0f, 3526.0f, 3527.0f, 3528.0f, 3529.0f, 3530.0f, 3531.0f, 3532.0f, 3533.0f, 3534.0f, 3535.0f, 3536.0f, 3537.0f, 3538.0f, 3539.0f, 3540.0f, 3541.0f, 3542.0f, 3543.0f, 3544.0f, 3545.0f, 3546.0f, 3547.0f, 3548.0f, 3549.0f, 3550.0f, 3551.0f, 3552.0f, 3553.0f, 3554.0f, 3555.0f, 3556.0f, 3557.0f, 3558.0f, 3559.0f, 3560.0f, 3561.0f, 3562.0f, 3563.0f, 3564.0f, 3565.0f, 3566.0f, 3567.0f, 3568.0f, 3569.0f, 3570.0f, 3571.0f, 3572.0f, 3573.0f, 3574.0f, 3575.0f, 3576.0f, 3577.0f, 3578.0f, 3579.0f, 3580.0f, 3581.0f, 3582.0f, 3583.0f, 3584.0f, 3585.0f, 3586.0f, 3587.0f, 3588.0f, 3589.0f, 3590.0f, 3591.0f, 3592.0f, 3593.0f, 3594.0f, 3595.0f, 3596.0f, 3597.0f, 3598.0f, 3599.0f, 3600.0f, 3601.0f, 3602.0f, 3603.0f, 3604.0f, 3605.0f, 3606.0f, 3607.0f, 3608.0f, 3609.0f, 3610.0f, 3611.0f, 3612.0f, 3613.0f, 3614.0f, 3615.0f, 3616.0f, 3617.0f, 3618.0f, 3619.0f, 3620.0f, 3621.0f, 3622.0f, 3623.0f, 3624.0f, 3625.0f, 3626.0f, 3627.0f, 3628.0f, 3629.0f, 3630.0f, 3631.0f, 3632.0f, 3633.0f, 3634.0f, 3635.0f, 3636.0f, 3637.0f, 3638.0f, 3639.0f, 3640.0f, 3641.0f, 3642.0f, 3643.0f, 3644.0f, 3645.0f, 3646.0f, 3647.0f, 3648.0f, 3649.0f, 3650.0f, 3651.0f, 3652.0f, 3653.0f, 3654.0f, 3655.0f, 3656.0f, 3657.0f, 3658.0f, 3659.0f, 3660.0f, 3661.0f, 3662.0f, 3663.0f, 3664.0f, 3665.0f, 3666.0f, 3667.0f, 3668.0f, 3669.0f, 3670.0f, 3671.0f, 3672.0f, 3673.0f, 3674.0f, 3675.0f, 3676.0f, 3677.0f, 3678.0f, 3679.0f, 3680.0f, 3681.0f, 3682.0f, 3683.0f, 3684.0f, 3685.0f, 3686.0f, 3687.0f, 3688.0f, 3689.0f, 3690.0f, 3691.0f, 3692.0f, 3693.0f, 3694.0f, 3695.0f, 3696.0f, 3697.0f, 3698.0f, 3699.0f, 3700.0f, 3701.0f, 3702.0f, 3703.0f, 3704.0f, 3705.0f, 3706.0f, 3707.0f, 3708.0f, 3709.0f, 3710.0f, 3711.0f, 3712.0f, 3713.0f, 3714.0f, 3715.0f, 3716.0f, 3717.0f, 3718.0f, 3719.0f, 3720.0f, 3721.0f, 3722.0f, 3723.0f, 3724.0f, 3725.0f, 3726.0f, 3727.0f, 3728.0f, 3729.0f, 3730.0f, 3731.0f, 3732.0f, 3733.0f, 3734.0f, 3735.0f, 3736.0f, 3737.0f, 3738.0f, 3739.0f, 3740.0f, 3741.0f, 3742.0f, 3743.0f, 3744.0f, 3745.0f, 3746.0f, 3747.0f, 3748.0f, 3749.0f, 3750.0f, 3751.0f, 3752.0f, 3753.0f, 3754.0f, 3755.0f, 3756.0f, 3757.0f, 3758.0f, 3759.0f, 3760.0f, 3761.0f, 3762.0f, 3763.0f, 3764.0f, 3765.0f, 3766.0f, 3767.0f, 3768.0f, 3769.0f, 3770.0f, 3771.0f, 3772.0f, 3773.0f, 3774.0f, 3775.0f, 3776.0f, 3777.0f, 3778.0f, 3779.0f, 3780.0f, 3781.0f, 3782.0f, 3783.0f, 3784.0f, 3785.0f, 3786.0f, 3787.0f, 3788.0f, 3789.0f, 3790.0f, 3791.0f, 3792.0f, 3793.0f, 3794.0f, 3795.0f, 3796.0f, 3797.0f, 3798.0f, 3799.0f, 3800.0f, 3801.0f, 3802.0f, 3803.0f, 3804.0f, 3805.0f, 3806.0f, 3807.0f, 3808.0f, 3809.0f, 3810.0f, 3811.0f, 3812.0f, 3813.0f, 3814.0f, 3815.0f, 3816.0f, 3817.0f, 3818.0f, 3819.0f, 3820.0f, 3821.0f, 3822.0f, 3823.0f, 3824.0f, 3825.0f, 3826.0f, 3827.0f, 3828.0f, 3829.0f, 3830.0f, 3831.0f, 3832.0f, 3833.0f, 3834.0f, 3835.0f, 3836.0f, 3837.0f, 3838.0f, 3839.0f, 3840.0f, 3841.0f, 3842.0f, 3843.0f, 3844.0f, 3845.0f, 3846.0f, 3847.0f, 3848.0f, 3849.0f, 3850.0f, 3851.0f, 3852.0f, 3853.0f, 3854.0f, 3855.0f, 3856.0f, 3857.0f, 3858.0f, 3859.0f, 3860.0f, 3861.0f, 3862.0f, 3863.0f, 3864.0f, 3865.0f, 3866.0f, 3867.0f, 3868.0f, 3869.0f, 3870.0f, 3871.0f, 3872.0f, 3873.0f, 3874.0f, 3875.0f, 3876.0f, 3877.0f, 3878.0f, 3879.0f, 3880.0f, 3881.0f, 3882.0f, 3883.0f, 3884.0f, 3885.0f, 3886.0f, 3887.0f, 3888.0f, 3889.0f, 3890.0f, 3891.0f, 3892.0f, 3893.0f, 3894.0f, 3895.0f, 3896.0f, 3897.0f, 3898.0f, 3899.0f, 3900.0f, 3901.0f, 3902.0f, 3903.0f, 3904.0f, 3905.0f, 3906.0f, 3907.0f, 3908.0f, 3909.0f, 3910.0f, 3911.0f, 3912.0f, 3913.0f, 3914.0f, 3915.0f, 3916.0f, 3917.0f, 3918.0f, 3919.0f, 3920.0f, 3921.0f, 3922.0f, 3923.0f, 3924.0f, 3925.0f, 3926.0f, 3927.0f, 3928.0f, 3929.0f, 3930.0f, 3931.0f, 3932.0f, 3933.0f, 3934.0f, 3935.0f, 3936.0f, 3937.0f, 3938.0f, 3939.0f, 3940.0f, 3941.0f, 3942.0f, 3943.0f, 3944.0f, 3945.0f, 3946.0f, 3947.0f, 3948.0f, 3949.0f, 3950.0f, 3951.0f, 3952.0f, 3953.0f, 3954.0f, 3955.0f, 3956.0f, 3957.0f, 3958.0f, 3959.0f, 3960.0f, 3961.0f, 3962.0f, 3963.0f, 3964.0f, 3965.0f, 3966.0f, 3967.0f, 3968.0f, 3969.0f, 3970.0f, 3971.0f, 3972.0f, 3973.0f, 3974.0f, 3975.0f, 3976.0f, 3977.0f, 3978.0f, 3979.0f, 3980.0f, 3981.0f, 3982.0f, 3983.0f, 3984.0f, 3985.0f, 3986.0f, 3987.0f, 3988.0f, 3989.0f, 3990.0f, 3991.0f, 3992.0f, 3993.0f, 3994.0f, 3995.0f, 3996.0f, 3997.0f, 3998.0f, 3999.0f, 4000.0f, 4001.0f, 4002.0f, 4003.0f, 4004.0f, 4005.0f, 4006.0f, 4007.0f, 4008.0f, 4009.0f, 4010.0f, 4011.0f, 4012.0f, 4013.0f, 4014.0f, 4015.0f, 4016.0f, 4017.0f, 4018.0f, 4019.0f, 4020.0f, 4021.0f, 4022.0f, 4023.0f, 4024.0f, 4025.0f, 4026.0f, 4027.0f, 4028.0f, 4029.0f, 4030.0f, 4031.0f, 4032.0f, 4033.0f, 4034.0f, 4035.0f, 4036.0f, 4037.0f, 4038.0f, 4039.0f, 4040.0f, 4041.0f, 4042.0f, 4043.0f, 4044.0f, 4045.0f, 4046.0f, 4047.0f, 4048.0f, 4049.0f, 4050.0f, 4051.0f, 4052.0f, 4053.0f, 4054.0f, 4055.0f, 4056.0f, 4057.0f, 4058.0f, 4059.0f, 4060.0f, 4061.0f, 4062.0f, 4063.0f, 4064.0f, 4065.0f, 4066.0f, 4067.0f, 4068.0f, 4069.0f, 4070.0f, 4071.0f, 4072.0f, 4073.0f, 4074.0f, 4075.0f, 4076.0f, 4077.0f, 4078.0f, 4079.0f, 4080.0f, 4081.0f, 4082.0f, 4083.0f, 4084.0f, 4085.0f, 4086.0f, 4087.0f, 4088.0f, 4089.0f, 4090.0f, 4091.0f, 4092.0f, 4093.0f, 4094.0f, 4095.0f, 4096.0f, 4097.0f, 4098.0f, 4099.0f, 4100.0f, 4101.0f, 4102.0f, 4103.0f, 4104.0f, 4105.0f, 4106.0f, 4107.0f, 4108.0f, 4109.0f, 4110.0f, 4111.0f, 4112.0f, 4113.0f, 4114.0f, 4115.0f, 4116.0f, 4117.0f, 4118.0f, 4119.0f, 4120.0f, 4121.0f, 4122.0f, 4123.0f, 4124.0f, 4125.0f, 4126.0f, 4127.0f, 4128.0f, 4129.0f, 4130.0f, 4131.0f, 4132.0f, 4133.0f, 4134.0f, 4135.0f, 4136.0f, 4137.0f, 4138.0f, 4139.0f, 4140.0f, 4141.0f, 4142.0f, 4143.0f, 4144.0f, 4145.0f, 4146.0f, 4147.0f, 4148.0f, 4149.0f, 4150.0f, 4151.0f, 4152.0f, 4153.0f, 4154.0f, 4155.0f, 4156.0f, 4157.0f, 4158.0f, 4159.0f, 4160.0f, 4161.0f, 4162.0f, 4163.0f, 4164.0f, 4165.0f, 4166.0f, 4167.0f, 4168.0f, 4169.0f, 4170.0f, 4171.0f, 4172.0f, 4173.0f, 4174.0f, 4175.0f, 4176.0f, 4177.0f, 4178.0f, 4179.0f, 4180.0f, 4181.0f, 4182.0f, 4183.0f, 4184.0f, 4185.0f, 4186.0f, 4187.0f, 4188.0f, 4189.0f, 4190.0f, 4191.0f, 4192.0f, 4193.0f, 4194.0f, 4195.0f, 4196.0f, 4197.0f, 4198.0f, 4199.0f, 4200.0f, 4201.0f, 4202.0f, 4203.0f, 4204.0f, 4205.0f, 4206.0f, 4207.0f, 4208.0f, 4209.0f, 4210.0f, 4211.0f, 4212.0f, 4213.0f, 4214.0f, 4215.0f, 4216.0f, 4217.0f, 4218.0f, 4219.0f, 4220.0f, 4221.0f, 4222.0f, 4223.0f, 4224.0f, 4225.0f, 4226.0f, 4227.0f, 4228.0f, 4229.0f, 4230.0f, 4231.0f, 4232.0f, 4233.0f, 4234.0f, 4235.0f, 4236.0f, 4237.0f, 4238.0f, 4239.0f, 4240.0f, 4241.0f, 4242.0f, 4243.0f, 4244.0f, 4245.0f, 4246.0f, 4247.0f, 4248.0f, 4249.0f, 4250.0f, 4251.0f, 4252.0f, 4253.0f, 4254.0f, 4255.0f, 4256.0f, 4257.0f, 4258.0f, 4259.0f, 4260.0f, 4261.0f, 4262.0f, 4263.0f, 4264.0f, 4265.0f, 4266.0f, 4267.0f, 4268.0f, 4269.0f, 4270.0f, 4271.0f, 4272.0f, 4273.0f, 4274.0f, 4275.0f, 4276.0f, 4277.0f, 4278.0f, 4279.0f, 4280.0f, 4281.0f, 4282.0f, 4283.0f, 4284.0f, 4285.0f, 4286.0f, 4287.0f, 4288.0f, 4289.0f, 4290.0f, 4291.0f, 4292.0f, 4293.0f, 4294.0f, 4295.0f, 4296.0f, 4297.0f, 4298.0f, 4299.0f, 4300.0f, 4301.0f, 4302.0f, 4303.0f, 4304.0f, 4305.0f, 4306.0f, 4307.0f, 4308.0f, 4309.0f, 4310.0f, 4311.0f, 4312.0f, 4313.0f, 4314.0f, 4315.0f, 4316.0f, 4317.0f, 4318.0f, 4319.0f, 4320.0f, 4321.0f, 4322.0f, 4323.0f, 4324.0f, 4325.0f, 4326.0f, 4327.0f, 4328.0f, 4329.0f, 4330.0f, 4331.0f, 4332.0f, 4333.0f, 4334.0f, 4335.0f, 4336.0f, 4337.0f, 4338.0f, 4339.0f, 4340.0f, 4341.0f, 4342.0f, 4343.0f, 4344.0f, 4345.0f, 4346.0f, 4347.0f, 4348.0f, 4349.0f, 4350.0f, 4351.0f, 4352.0f, 4353.0f, 4354.0f, 4355.0f, 4356.0f, 4357.0f, 4358.0f, 4359.0f, 4360.0f, 4361.0f, 4362.0f, 4363.0f, 4364.0f, 4365.0f, 4366.0f, 4367.0f, 4368.0f, 4369.0f, 4370.0f, 4371.0f, 4372.0f, 4373.0f, 4374.0f, 4375.0f, 4376.0f, 4377.0f, 4378.0f, 4379.0f, 4380.0f, 4381.0f, 4382.0f, 4383.0f, 4384.0f, 4385.0f, 4386.0f, 4387.0f, 4388.0f, 4389.0f, 4390.0f, 4391.0f, 4392.0f, 4393.0f, 4394.0f, 4395.0f, 4396.0f, 4397.0f, 4398.0f, 4399.0f, 4400.0f, 4401.0f, 4402.0f, 4403.0f, 4404.0f, 4405.0f, 4406.0f, 4407.0f, 4408.0f, 4409.0f, 4410.0f, 4411.0f, 4412.0f, 4413.0f, 4414.0f, 4415.0f, 4416.0f, 4417.0f, 4418.0f, 4419.0f, 4420.0f, 4421.0f, 4422.0f, 4423.0f, 4424.0f, 4425.0f, 4426.0f, 4427.0f, 4428.0f, 4429.0f, 4430.0f, 4431.0f, 4432.0f, 4433.0f, 4434.0f, 4435.0f, 4436.0f, 4437.0f, 4438.0f, 4439.0f, 4440.0f, 4441.0f, 4442.0f, 4443.0f, 4444.0f, 4445.0f, 4446.0f, 4447.0f, 4448.0f, 4449.0f, 4450.0f, 4451.0f, 4452.0f, 4453.0f, 4454.0f, 4455.0f, 4456.0f, 4457.0f, 4458.0f, 4459.0f, 4460.0f, 4461.0f, 4462.0f, 4463.0f, 4464.0f, 4465.0f, 4466.0f, 4467.0f, 4468.0f, 4469.0f, 4470.0f, 4471.0f, 4472.0f, 4473.0f, 4474.0f, 4475.0f, 4476.0f, 4477.0f, 4478.0f, 4479.0f, 4480.0f, 4481.0f, 4482.0f, 4483.0f, 4484.0f, 4485.0f, 4486.0f, 4487.0f, 4488.0f, 4489.0f, 4490.0f, 4491.0f, 4492.0f, 4493.0f, 4494.0f, 4495.0f, 4496.0f, 4497.0f, 4498.0f, 4499.0f, 4500.0f, 4501.0f, 4502.0f, 4503.0f, 4504.0f, 4505.0f, 4506.0f, 4507.0f, 4508.0f, 4509.0f, 4510.0f, 4511.0f, 4512.0f, 4513.0f, 4514.0f, 4515.0f, 4516.0f, 4517.0f, 4518.0f, 4519.0f, 4520.0f, 4521.0f, 4522.0f, 4523.0f, 4524.0f, 4525.0f, 4526.0f, 4527.0f, 4528.0f, 4529.0f, 4530.0f, 4531.0f, 4532.0f, 4533.0f, 4534.0f, 4535.0f, 4536.0f, 4537.0f, 4538.0f, 4539.0f, 4540.0f, 4541.0f, 4542.0f, 4543.0f, 4544.0f, 4545.0f, 4546.0f, 4547.0f, 4548.0f, 4549.0f, 4550.0f, 4551.0f, 4552.0f, 4553.0f, 4554.0f, 4555.0f, 4556.0f, 4557.0f, 4558.0f, 4559.0f, 4560.0f, 4561.0f, 4562.0f, 4563.0f, 4564.0f, 4565.0f, 4566.0f, 4567.0f, 4568.0f, 4569.0f, 4570.0f, 4571.0f, 4572.0f, 4573.0f, 4574.0f, 4575.0f, 4576.0f, 4577.0f, 4578.0f, 4579.0f, 4580.0f, 4581.0f, 4582.0f, 4583.0f, 4584.0f, 4585.0f, 4586.0f, 4587.0f, 4588.0f, 4589.0f, 4590.0f, 4591.0f, 4592.0f, 4593.0f, 4594.0f, 4595.0f, 4596.0f, 4597.0f, 4598.0f, 4599.0f, 4600.0f, 4601.0f, 4602.0f, 4603.0f, 4604.0f, 4605.0f, 4606.0f, 4607.0f, 4608.0f, 4609.0f, 4610.0f, 4611.0f, 4612.0f, 4613.0f, 4614.0f, 4615.0f, 4616.0f, 4617.0f, 4618.0f, 4619.0f, 4620.0f, 4621.0f, 4622.0f, 4623.0f, 4624.0f, 4625.0f, 4626.0f, 4627.0f, 4628.0f, 4629.0f, 4630.0f, 4631.0f, 4632.0f, 4633.0f, 4634.0f, 4635.0f, 4636.0f, 4637.0f, 4638.0f, 4639.0f, 4640.0f, 4641.0f, 4642.0f, 4643.0f, 4644.0f, 4645.0f, 4646.0f, 4647.0f, 4648.0f, 4649.0f, 4650.0f, 4651.0f, 4652.0f, 4653.0f, 4654.0f, 4655.0f, 4656.0f, 4657.0f, 4658.0f, 4659.0f, 4660.0f, 4661.0f, 4662.0f, 4663.0f, 4664.0f, 4665.0f, 4666.0f, 4667.0f, 4668.0f, 4669.0f, 4670.0f, 4671.0f, 4672.0f, 4673.0f, 4674.0f, 4675.0f, 4676.0f, 4677.0f, 4678.0f, 4679.0f, 4680.0f, 4681.0f, 4682.0f, 4683.0f, 4684.0f, 4685.0f, 4686.0f, 4687.0f, 4688.0f, 4689.0f, 4690.0f, 4691.0f, 4692.0f, 4693.0f, 4694.0f, 4695.0f, 4696.0f, 4697.0f, 4698.0f, 4699.0f, 4700.0f, 4701.0f, 4702.0f, 4703.0f, 4704.0f, 4705.0f, 4706.0f, 4707.0f, 4708.0f, 4709.0f, 4710.0f, 4711.0f, 4712.0f, 4713.0f, 4714.0f, 4715.0f, 4716.0f, 4717.0f, 4718.0f, 4719.0f, 4720.0f, 4721.0f, 4722.0f, 4723.0f, 4724.0f, 4725.0f, 4726.0f, 4727.0f, 4728.0f, 4729.0f, 4730.0f, 4731.0f, 4732.0f, 4733.0f, 4734.0f, 4735.0f, 4736.0f, 4737.0f, 4738.0f, 4739.0f, 4740.0f, 4741.0f, 4742.0f, 4743.0f, 4744.0f, 4745.0f, 4746.0f, 4747.0f, 4748.0f, 4749.0f, 4750.0f, 4751.0f, 4752.0f, 4753.0f, 4754.0f, 4755.0f, 4756.0f, 4757.0f, 4758.0f, 4759.0f, 4760.0f, 4761.0f, 4762.0f, 4763.0f, 4764.0f, 4765.0f, 4766.0f, 4767.0f, 4768.0f, 4769.0f, 4770.0f, 4771.0f, 4772.0f, 4773.0f, 4774.0f, 4775.0f, 4776.0f, 4777.0f, 4778.0f, 4779.0f, 4780.0f, 4781.0f, 4782.0f, 4783.0f, 4784.0f, 4785.0f, 4786.0f, 4787.0f, 4788.0f, 4789.0f, 4790.0f, 4791.0f, 4792.0f, 4793.0f, 4794.0f, 4795.0f, 4796.0f, 4797.0f, 4798.0f, 4799.0f, 4800.0f, 4801.0f, 4802.0f, 4803.0f, 4804.0f, 4805.0f, 4806.0f, 4807.0f, 4808.0f, 4809.0f, 4810.0f, 4811.0f, 4812.0f, 4813.0f, 4814.0f, 4815.0f, 4816.0f, 4817.0f, 4818.0f, 4819.0f, 4820.0f, 4821.0f, 4822.0f, 4823.0f, 4824.0f, 4825.0f, 4826.0f, 4827.0f, 4828.0f, 4829.0f, 4830.0f, 4831.0f, 4832.0f, 4833.0f, 4834.0f, 4835.0f, 4836.0f, 4837.0f, 4838.0f, 4839.0f, 4840.0f, 4841.0f, 4842.0f, 4843.0f, 4844.0f, 4845.0f, 4846.0f, 4847.0f, 4848.0f, 4849.0f, 4850.0f, 4851.0f, 4852.0f, 4853.0f, 4854.0f, 4855.0f, 4856.0f, 4857.0f, 4858.0f, 4859.0f, 4860.0f, 4861.0f, 4862.0f, 4863.0f, 4864.0f, 4865.0f, 4866.0f, 4867.0f, 4868.0f, 4869.0f, 4870.0f, 4871.0f, 4872.0f, 4873.0f, 4874.0f, 4875.0f, 4876.0f, 4877.0f, 4878.0f, 4879.0f, 4880.0f, 4881.0f, 4882.0f, 4883.0f, 4884.0f, 4885.0f, 4886.0f, 4887.0f, 4888.0f, 4889.0f, 4890.0f, 4891.0f, 4892.0f, 4893.0f, 4894.0f, 4895.0f, 4896.0f, 4897.0f, 4898.0f, 4899.0f, 4900.0f, 4901.0f, 4902.0f, 4903.0f, 4904.0f, 4905.0f, 4906.0f, 4907.0f, 4908.0f, 4909.0f, 4910.0f, 4911.0f, 4912.0f, 4913.0f, 4914.0f, 4915.0f, 4916.0f, 4917.0f, 4918.0f, 4919.0f, 4920.0f, 4921.0f, 4922.0f, 4923.0f, 4924.0f, 4925.0f, 4926.0f, 4927.0f, 4928.0f, 4929.0f, 4930.0f, 4931.0f, 4932.0f, 4933.0f, 4934.0f, 4935.0f, 4936.0f, 4937.0f, 4938.0f, 4939.0f, 4940.0f, 4941.0f, 4942.0f, 4943.0f, 4944.0f, 4945.0f, 4946.0f, 4947.0f, 4948.0f, 4949.0f, 4950.0f, 4951.0f, 4952.0f, 4953.0f, 4954.0f, 4955.0f, 4956.0f, 4957.0f, 4958.0f, 4959.0f, 4960.0f, 4961.0f, 4962.0f, 4963.0f, 4964.0f, 4965.0f, 4966.0f, 4967.0f, 4968.0f, 4969.0f, 4970.0f, 4971.0f, 4972.0f, 4973.0f, 4974.0f, 4975.0f, 4976.0f, 4977.0f, 4978.0f, 4979.0f, 4980.0f, 4981.0f, 4982.0f, 4983.0f, 4984.0f, 4985.0f, 4986.0f, 4987.0f, 4988.0f, 4989.0f, 4990.0f, 4991.0f, 4992.0f, 4993.0f, 4994.0f, 4995.0f, 4996.0f, 4997.0f, 4998.0f, 4999.0f, 5000.0f, 5001.0f, 5002.0f, 5003.0f, 5004.0f, 5005.0f, 5006.0f, 5007.0f, 5008.0f, 5009.0f, 5010.0f, 5011.0f, 5012.0f, 5013.0f, 5014.0f, 5015.0f, 5016.0f, 5017.0f, 5018.0f, 5019.0f, 5020.0f, 5021.0f, 5022.0f, 5023.0f, 5024.0f, 5025.0f, 5026.0f, 5027.0f, 5028.0f, 5029.0f, 5030.0f, 5031.0f, 5032.0f, 5033.0f, 5034.0f, 5035.0f, 5036.0f, 5037.0f, 5038.0f, 5039.0f, 5040.0f, 5041.0f, 5042.0f, 5043.0f, 5044.0f, 5045.0f, 5046.0f, 5047.0f, 5048.0f, 5049.0f, 5050.0f, 5051.0f, 5052.0f, 5053.0f, 5054.0f, 5055.0f, 5056.0f, 5057.0f, 5058.0f, 5059.0f, 5060.0f, 5061.0f, 5062.0f, 5063.0f, 5064.0f, 5065.0f, 5066.0f, 5067.0f, 5068.0f, 5069.0f, 5070.0f, 5071.0f, 5072.0f, 5073.0f, 5074.0f, 5075.0f, 5076.0f, 5077.0f, 5078.0f, 5079.0f, 5080.0f, 5081.0f, 5082.0f, 5083.0f, 5084.0f, 5085.0f, 5086.0f, 5087.0f, 5088.0f, 5089.0f, 5090.0f, 5091.0f, 5092.0f, 5093.0f, 5094.0f, 5095.0f, 5096.0f, 5097.0f, 5098.0f, 5099.0f, 5100.0f, 5101.0f, 5102.0f, 5103.0f, 5104.0f, 5105.0f, 5106.0f, 5107.0f, 5108.0f, 5109.0f, 5110.0f, 5111.0f, 5112.0f, 5113.0f, 5114.0f, 5115.0f, 5116.0f, 5117.0f, 5118.0f, 5119.0f, 5120.0f, 5121.0f, 5122.0f, 5123.0f, 5124.0f, 5125.0f, 5126.0f, 5127.0f, 5128.0f, 5129.0f, 5130.0f, 5131.0f, 5132.0f, 5133.0f, 5134.0f, 5135.0f, 5136.0f, 5137.0f, 5138.0f, 5139.0f, 5140.0f, 5141.0f, 5142.0f, 5143.0f, 5144.0f, 5145.0f, 5146.0f, 5147.0f, 5148.0f, 5149.0f, 5150.0f, 5151.0f, 5152.0f, 5153.0f, 5154.0f, 5155.0f, 5156.0f, 5157.0f, 5158.0f, 5159.0f, 5160.0f, 5161.0f, 5162.0f, 5163.0f, 5164.0f, 5165.0f, 5166.0f, 5167.0f, 5168.0f, 5169.0f, 5170.0f, 5171.0f, 5172.0f, 5173.0f, 5174.0f, 5175.0f, 5176.0f, 5177.0f, 5178.0f, 5179.0f, 5180.0f, 5181.0f, 5182.0f, 5183.0f, 5184.0f, 5185.0f, 5186.0f, 5187.0f, 5188.0f, 5189.0f, 5190.0f, 5191.0f, 5192.0f, 5193.0f, 5194.0f, 5195.0f, 5196.0f, 5197.0f, 5198.0f, 5199.0f, 5200.0f, 5201.0f, 5202.0f, 5203.0f, 5204.0f, 5205.0f, 5206.0f, 5207.0f, 5208.0f, 5209.0f, 5210.0f, 5211.0f, 5212.0f, 5213.0f, 5214.0f, 5215.0f, 5216.0f, 5217.0f, 5218.0f, 5219.0f, 5220.0f, 5221.0f, 5222.0f, 5223.0f, 5224.0f, 5225.0f, 5226.0f, 5227.0f, 5228.0f, 5229.0f, 5230.0f, 5231.0f, 5232.0f, 5233.0f, 5234.0f, 5235.0f, 5236.0f, 5237.0f, 5238.0f, 5239.0f, 5240.0f, 5241.0f, 5242.0f, 5243.0f, 5244.0f, 5245.0f, 5246.0f, 5247.0f, 5248.0f, 5249.0f, 5250.0f, 5251.0f, 5252.0f, 5253.0f, 5254.0f, 5255.0f, 5256.0f, 5257.0f, 5258.0f, 5259.0f, 5260.0f, 5261.0f, 5262.0f, 5263.0f, 5264.0f, 5265.0f, 5266.0f, 5267.0f, 5268.0f, 5269.0f, 5270.0f, 5271.0f, 5272.0f, 5273.0f, 5274.0f, 5275.0f, 5276.0f, 5277.0f, 5278.0f, 5279.0f, 5280.0f, 5281.0f, 5282.0f, 5283.0f, 5284.0f, 5285.0f, 5286.0f, 5287.0f, 5288.0f, 5289.0f, 5290.0f, 5291.0f, 5292.0f, 5293.0f, 5294.0f, 5295.0f, 5296.0f, 5297.0f, 5298.0f, 5299.0f, 5300.0f, 5301.0f, 5302.0f, 5303.0f, 5304.0f, 5305.0f, 5306.0f, 5307.0f, 5308.0f, 5309.0f, 5310.0f, 5311.0f, 5312.0f, 5313.0f, 5314.0f, 5315.0f, 5316.0f, 5317.0f, 5318.0f, 5319.0f, 5320.0f, 5321.0f, 5322.0f, 5323.0f, 5324.0f, 5325.0f, 5326.0f, 5327.0f, 5328.0f, 5329.0f, 5330.0f, 5331.0f, 5332.0f, 5333.0f, 5334.0f, 5335.0f, 5336.0f, 5337.0f, 5338.0f, 5339.0f, 5340.0f, 5341.0f, 5342.0f, 5343.0f, 5344.0f, 5345.0f, 5346.0f, 5347.0f, 5348.0f, 5349.0f, 5350.0f, 5351.0f, 5352.0f, 5353.0f, 5354.0f, 5355.0f, 5356.0f, 5357.0f, 5358.0f, 5359.0f, 5360.0f, 5361.0f, 5362.0f, 5363.0f, 5364.0f, 5365.0f, 5366.0f, 5367.0f, 5368.0f, 5369.0f, 5370.0f, 5371.0f, 5372.0f, 5373.0f, 5374.0f, 5375.0f, 5376.0f, 5377.0f, 5378.0f, 5379.0f, 5380.0f, 5381.0f, 5382.0f, 5383.0f, 5384.0f, 5385.0f, 5386.0f, 5387.0f, 5388.0f, 5389.0f, 5390.0f, 5391.0f, 5392.0f, 5393.0f, 5394.0f, 5395.0f, 5396.0f, 5397.0f, 5398.0f, 5399.0f, 5400.0f, 5401.0f, 5402.0f, 5403.0f, 5404.0f, 5405.0f, 5406.0f, 5407.0f, 5408.0f, 5409.0f, 5410.0f, 5411.0f, 5412.0f, 5413.0f, 5414.0f, 5415.0f, 5416.0f, 5417.0f, 5418.0f, 5419.0f, 5420.0f, 5421.0f, 5422.0f, 5423.0f, 5424.0f, 5425.0f, 5426.0f, 5427.0f, 5428.0f, 5429.0f, 5430.0f, 5431.0f, 5432.0f, 5433.0f, 5434.0f, 5435.0f, 5436.0f, 5437.0f, 5438.0f, 5439.0f, 5440.0f, 5441.0f, 5442.0f, 5443.0f, 5444.0f, 5445.0f, 5446.0f, 5447.0f, 5448.0f, 5449.0f, 5450.0f, 5451.0f, 5452.0f, 5453.0f, 5454.0f, 5455.0f, 5456.0f, 5457.0f, 5458.0f, 5459.0f, 5460.0f, 5461.0f, 5462.0f, 5463.0f, 5464.0f, 5465.0f, 5466.0f, 5467.0f, 5468.0f, 5469.0f, 5470.0f, 5471.0f, 5472.0f, 5473.0f, 5474.0f, 5475.0f, 5476.0f, 5477.0f, 5478.0f, 5479.0f, 5480.0f, 5481.0f, 5482.0f, 5483.0f, 5484.0f, 5485.0f, 5486.0f, 5487.0f, 5488.0f, 5489.0f, 5490.0f, 5491.0f, 5492.0f, 5493.0f, 5494.0f, 5495.0f, 5496.0f, 5497.0f, 5498.0f, 5499.0f, 5500.0f, 5501.0f, 5502.0f, 5503.0f, 5504.0f, 5505.0f, 5506.0f, 5507.0f, 5508.0f, 5509.0f, 5510.0f, 5511.0f, 5512.0f, 5513.0f, 5514.0f, 5515.0f, 5516.0f, 5517.0f, 5518.0f, 5519.0f, 5520.0f, 5521.0f, 5522.0f, 5523.0f, 5524.0f, 5525.0f, 5526.0f, 5527.0f, 5528.0f, 5529.0f, 5530.0f, 5531.0f, 5532.0f, 5533.0f, 5534.0f, 5535.0f, 5536.0f, 5537.0f, 5538.0f, 5539.0f, 5540.0f, 5541.0f, 5542.0f, 5543.0f, 5544.0f, 5545.0f, 5546.0f, 5547.0f, 5548.0f, 5549.0f, 5550.0f, 5551.0f, 5552.0f, 5553.0f, 5554.0f, 5555.0f, 5556.0f, 5557.0f, 5558.0f, 5559.0f, 5560.0f, 5561.0f, 5562.0f, 5563.0f, 5564.0f, 5565.0f, 5566.0f, 5567.0f, 5568.0f, 5569.0f, 5570.0f, 5571.0f, 5572.0f, 5573.0f, 5574.0f, 5575.0f, 5576.0f, 5577.0f, 5578.0f, 5579.0f, 5580.0f, 5581.0f, 5582.0f, 5583.0f, 5584.0f, 5585.0f, 5586.0f, 5587.0f, 5588.0f, 5589.0f, 5590.0f, 5591.0f, 5592.0f, 5593.0f, 5594.0f, 5595.0f, 5596.0f, 5597.0f, 5598.0f, 5599.0f, 5600.0f, 5601.0f, 5602.0f, 5603.0f, 5604.0f, 5605.0f, 5606.0f, 5607.0f, 5608.0f, 5609.0f, 5610.0f, 5611.0f, 5612.0f, 5613.0f, 5614.0f, 5615.0f, 5616.0f, 5617.0f, 5618.0f, 5619.0f, 5620.0f, 5621.0f, 5622.0f, 5623.0f, 5624.0f, 5625.0f, 5626.0f, 5627.0f, 5628.0f, 5629.0f, 5630.0f, 5631.0f, 5632.0f, 5633.0f, 5634.0f, 5635.0f, 5636.0f, 5637.0f, 5638.0f, 5639.0f, 5640.0f, 5641.0f, 5642.0f, 5643.0f, 5644.0f, 5645.0f, 5646.0f, 5647.0f, 5648.0f, 5649.0f, 5650.0f, 5651.0f, 5652.0f, 5653.0f, 5654.0f, 5655.0f, 5656.0f, 5657.0f, 5658.0f, 5659.0f, 5660.0f, 5661.0f, 5662.0f, 5663.0f, 5664.0f, 5665.0f, 5666.0f, 5667.0f, 5668.0f, 5669.0f, 5670.0f, 5671.0f, 5672.0f, 5673.0f, 5674.0f, 5675.0f, 5676.0f, 5677.0f, 5678.0f, 5679.0f, 5680.0f, 5681.0f, 5682.0f, 5683.0f, 5684.0f, 5685.0f, 5686.0f, 5687.0f, 5688.0f, 5689.0f, 5690.0f, 5691.0f, 5692.0f, 5693.0f, 5694.0f, 5695.0f, 5696.0f, 5697.0f, 5698.0f, 5699.0f, 5700.0f, 5701.0f, 5702.0f, 5703.0f, 5704.0f, 5705.0f, 5706.0f, 5707.0f, 5708.0f, 5709.0f, 5710.0f, 5711.0f, 5712.0f, 5713.0f, 5714.0f, 5715.0f, 5716.0f, 5717.0f, 5718.0f, 5719.0f, 5720.0f, 5721.0f, 5722.0f, 5723.0f, 5724.0f, 5725.0f, 5726.0f, 5727.0f, 5728.0f, 5729.0f, 5730.0f, 5731.0f, 5732.0f, 5733.0f, 5734.0f, 5735.0f, 5736.0f, 5737.0f, 5738.0f, 5739.0f, 5740.0f, 5741.0f, 5742.0f, 5743.0f, 5744.0f, 5745.0f, 5746.0f, 5747.0f, 5748.0f, 5749.0f, 5750.0f, 5751.0f, 5752.0f, 5753.0f, 5754.0f, 5755.0f, 5756.0f, 5757.0f, 5758.0f, 5759.0f, 5760.0f, 5761.0f, 5762.0f, 5763.0f, 5764.0f, 5765.0f, 5766.0f, 5767.0f, 5768.0f, 5769.0f, 5770.0f, 5771.0f, 5772.0f, 5773.0f, 5774.0f, 5775.0f, 5776.0f, 5777.0f, 5778.0f, 5779.0f, 5780.0f, 5781.0f, 5782.0f, 5783.0f, 5784.0f, 5785.0f, 5786.0f, 5787.0f, 5788.0f, 5789.0f, 5790.0f, 5791.0f, 5792.0f, 5793.0f, 5794.0f, 5795.0f, 5796.0f, 5797.0f, 5798.0f, 5799.0f, 5800.0f, 5801.0f, 5802.0f, 5803.0f, 5804.0f, 5805.0f, 5806.0f, 5807.0f, 5808.0f, 5809.0f, 5810.0f, 5811.0f, 5812.0f, 5813.0f, 5814.0f, 5815.0f, 5816.0f, 5817.0f, 5818.0f, 5819.0f, 5820.0f, 5821.0f, 5822.0f, 5823.0f, 5824.0f, 5825.0f, 5826.0f, 5827.0f, 5828.0f, 5829.0f, 5830.0f, 5831.0f, 5832.0f, 5833.0f, 5834.0f, 5835.0f, 5836.0f, 5837.0f, 5838.0f, 5839.0f, 5840.0f, 5841.0f, 5842.0f, 5843.0f, 5844.0f, 5845.0f, 5846.0f, 5847.0f, 5848.0f, 5849.0f, 5850.0f, 5851.0f, 5852.0f, 5853.0f, 5854.0f, 5855.0f, 5856.0f, 5857.0f, 5858.0f, 5859.0f, 5860.0f, 5861.0f, 5862.0f, 5863.0f, 5864.0f, 5865.0f, 5866.0f, 5867.0f, 5868.0f, 5869.0f, 5870.0f, 5871.0f, 5872.0f, 5873.0f, 5874.0f, 5875.0f, 5876.0f, 5877.0f, 5878.0f, 5879.0f, 5880.0f, 5881.0f, 5882.0f, 5883.0f, 5884.0f, 5885.0f, 5886.0f, 5887.0f, 5888.0f, 5889.0f, 5890.0f, 5891.0f, 5892.0f, 5893.0f, 5894.0f, 5895.0f, 5896.0f, 5897.0f, 5898.0f, 5899.0f, 5900.0f, 5901.0f, 5902.0f, 5903.0f, 5904.0f, 5905.0f, 5906.0f, 5907.0f, 5908.0f, 5909.0f, 5910.0f, 5911.0f, 5912.0f, 5913.0f, 5914.0f, 5915.0f, 5916.0f, 5917.0f, 5918.0f, 5919.0f, 5920.0f, 5921.0f, 5922.0f, 5923.0f, 5924.0f, 5925.0f, 5926.0f, 5927.0f, 5928.0f, 5929.0f, 5930.0f, 5931.0f, 5932.0f, 5933.0f, 5934.0f, 5935.0f, 5936.0f, 5937.0f, 5938.0f, 5939.0f, 5940.0f, 5941.0f, 5942.0f, 5943.0f, 5944.0f, 5945.0f, 5946.0f, 5947.0f, 5948.0f, 5949.0f, 5950.0f, 5951.0f, 5952.0f, 5953.0f, 5954.0f, 5955.0f, 5956.0f, 5957.0f, 5958.0f, 5959.0f, 5960.0f, 5961.0f, 5962.0f, 5963.0f, 5964.0f, 5965.0f, 5966.0f, 5967.0f, 5968.0f, 5969.0f, 5970.0f, 5971.0f, 5972.0f, 5973.0f, 5974.0f, 5975.0f, 5976.0f, 5977.0f, 5978.0f, 5979.0f, 5980.0f, 5981.0f, 5982.0f, 5983.0f, 5984.0f, 5985.0f, 5986.0f, 5987.0f, 5988.0f, 5989.0f, 5990.0f, 5991.0f, 5992.0f, 5993.0f, 5994.0f, 5995.0f, 5996.0f, 5997.0f, 5998.0f, 5999.0f, 6000.0f, 6001.0f, 6002.0f, 6003.0f, 6004.0f, 6005.0f, 6006.0f, 6007.0f, 6008.0f, 6009.0f, 6010.0f, 6011.0f, 6012.0f, 6013.0f, 6014.0f, 6015.0f, 6016.0f, 6017.0f, 6018.0f, 6019.0f, 6020.0f, 6021.0f, 6022.0f, 6023.0f, 6024.0f, 6025.0f, 6026.0f, 6027.0f, 6028.0f, 6029.0f, 6030.0f, 6031.0f, 6032.0f, 6033.0f, 6034.0f, 6035.0f, 6036.0f, 6037.0f, 6038.0f, 6039.0f, 6040.0f, 6041.0f, 6042.0f, 6043.0f, 6044.0f, 6045.0f, 6046.0f, 6047.0f, 6048.0f, 6049.0f, 6050.0f, 6051.0f, 6052.0f, 6053.0f, 6054.0f, 6055.0f, 6056.0f, 6057.0f, 6058.0f, 6059.0f, 6060.0f, 6061.0f, 6062.0f, 6063.0f, 6064.0f, 6065.0f, 6066.0f, 6067.0f, 6068.0f, 6069.0f, 6070.0f, 6071.0f, 6072.0f, 6073.0f, 6074.0f, 6075.0f, 6076.0f, 6077.0f, 6078.0f, 6079.0f, 6080.0f, 6081.0f, 6082.0f, 6083.0f, 6084.0f, 6085.0f, 6086.0f, 6087.0f, 6088.0f, 6089.0f, 6090.0f, 6091.0f, 6092.0f, 6093.0f, 6094.0f, 6095.0f, 6096.0f, 6097.0f, 6098.0f, 6099.0f, 6100.0f, 6101.0f, 6102.0f, 6103.0f, 6104.0f, 6105.0f, 6106.0f, 6107.0f, 6108.0f, 6109.0f, 6110.0f, 6111.0f, 6112.0f, 6113.0f, 6114.0f, 6115.0f, 6116.0f, 6117.0f, 6118.0f, 6119.0f, 6120.0f, 6121.0f, 6122.0f, 6123.0f, 6124.0f, 6125.0f, 6126.0f, 6127.0f, 6128.0f, 6129.0f, 6130.0f, 6131.0f, 6132.0f, 6133.0f, 6134.0f, 6135.0f, 6136.0f, 6137.0f, 6138.0f, 6139.0f, 6140.0f, 6141.0f, 6142.0f, 6143.0f, 6144.0f, 6145.0f, 6146.0f, 6147.0f, 6148.0f, 6149.0f, 6150.0f, 6151.0f, 6152.0f, 6153.0f, 6154.0f, 6155.0f, 6156.0f, 6157.0f, 6158.0f, 6159.0f, 6160.0f, 6161.0f, 6162.0f, 6163.0f, 6164.0f, 6165.0f, 6166.0f, 6167.0f, 6168.0f, 6169.0f, 6170.0f, 6171.0f, 6172.0f, 6173.0f, 6174.0f, 6175.0f, 6176.0f, 6177.0f, 6178.0f, 6179.0f, 6180.0f, 6181.0f, 6182.0f, 6183.0f, 6184.0f, 6185.0f, 6186.0f, 6187.0f, 6188.0f, 6189.0f, 6190.0f, 6191.0f, 6192.0f, 6193.0f, 6194.0f, 6195.0f, 6196.0f, 6197.0f, 6198.0f, 6199.0f, 6200.0f, 6201.0f, 6202.0f, 6203.0f, 6204.0f, 6205.0f, 6206.0f, 6207.0f, 6208.0f, 6209.0f, 6210.0f, 6211.0f, 6212.0f, 6213.0f, 6214.0f, 6215.0f, 6216.0f, 6217.0f, 6218.0f, 6219.0f, 6220.0f, 6221.0f, 6222.0f, 6223.0f, 6224.0f, 6225.0f, 6226.0f, 6227.0f, 6228.0f, 6229.0f, 6230.0f, 6231.0f, 6232.0f, 6233.0f, 6234.0f, 6235.0f, 6236.0f, 6237.0f, 6238.0f, 6239.0f, 6240.0f, 6241.0f, 6242.0f, 6243.0f, 6244.0f, 6245.0f, 6246.0f, 6247.0f, 6248.0f, 6249.0f, 6250.0f, 6251.0f, 6252.0f, 6253.0f, 6254.0f, 6255.0f, 6256.0f, 6257.0f, 6258.0f, 6259.0f, 6260.0f, 6261.0f, 6262.0f, 6263.0f, 6264.0f, 6265.0f, 6266.0f, 6267.0f, 6268.0f, 6269.0f, 6270.0f, 6271.0f, 6272.0f, 6273.0f, 6274.0f, 6275.0f, 6276.0f, 6277.0f, 6278.0f, 6279.0f, 6280.0f, 6281.0f, 6282.0f, 6283.0f, 6284.0f, 6285.0f, 6286.0f, 6287.0f, 6288.0f, 6289.0f, 6290.0f, 6291.0f, 6292.0f, 6293.0f, 6294.0f, 6295.0f, 6296.0f, 6297.0f, 6298.0f, 6299.0f, 6300.0f, 6301.0f, 6302.0f, 6303.0f, 6304.0f, 6305.0f, 6306.0f, 6307.0f, 6308.0f, 6309.0f, 6310.0f, 6311.0f, 6312.0f, 6313.0f, 6314.0f, 6315.0f, 6316.0f, 6317.0f, 6318.0f, 6319.0f, 6320.0f, 6321.0f, 6322.0f, 6323.0f, 6324.0f, 6325.0f, 6326.0f, 6327.0f, 6328.0f, 6329.0f, 6330.0f, 6331.0f, 6332.0f, 6333.0f, 6334.0f, 6335.0f, 6336.0f, 6337.0f, 6338.0f, 6339.0f, 6340.0f, 6341.0f, 6342.0f, 6343.0f, 6344.0f, 6345.0f, 6346.0f, 6347.0f, 6348.0f, 6349.0f, 6350.0f, 6351.0f, 6352.0f, 6353.0f, 6354.0f, 6355.0f, 6356.0f, 6357.0f, 6358.0f, 6359.0f, 6360.0f, 6361.0f, 6362.0f, 6363.0f, 6364.0f, 6365.0f, 6366.0f, 6367.0f, 6368.0f, 6369.0f, 6370.0f, 6371.0f, 6372.0f, 6373.0f, 6374.0f, 6375.0f, 6376.0f, 6377.0f, 6378.0f, 6379.0f, 6380.0f, 6381.0f, 6382.0f, 6383.0f, 6384.0f, 6385.0f, 6386.0f, 6387.0f, 6388.0f, 6389.0f, 6390.0f, 6391.0f, 6392.0f, 6393.0f, 6394.0f, 6395.0f, 6396.0f, 6397.0f, 6398.0f, 6399.0f, 6400.0f, 6401.0f, 6402.0f, 6403.0f, 6404.0f, 6405.0f, 6406.0f, 6407.0f, 6408.0f, 6409.0f, 6410.0f, 6411.0f, 6412.0f, 6413.0f, 6414.0f, 6415.0f, 6416.0f, 6417.0f, 6418.0f, 6419.0f, 6420.0f, 6421.0f, 6422.0f, 6423.0f, 6424.0f, 6425.0f, 6426.0f, 6427.0f, 6428.0f, 6429.0f, 6430.0f, 6431.0f, 6432.0f, 6433.0f, 6434.0f, 6435.0f, 6436.0f, 6437.0f, 6438.0f, 6439.0f, 6440.0f, 6441.0f, 6442.0f, 6443.0f, 6444.0f, 6445.0f, 6446.0f, 6447.0f, 6448.0f, 6449.0f, 6450.0f, 6451.0f, 6452.0f, 6453.0f, 6454.0f, 6455.0f, 6456.0f, 6457.0f, 6458.0f, 6459.0f, 6460.0f, 6461.0f, 6462.0f, 6463.0f, 6464.0f, 6465.0f, 6466.0f, 6467.0f, 6468.0f, 6469.0f, 6470.0f, 6471.0f, 6472.0f, 6473.0f, 6474.0f, 6475.0f, 6476.0f, 6477.0f, 6478.0f, 6479.0f, 6480.0f, 6481.0f, 6482.0f, 6483.0f, 6484.0f, 6485.0f, 6486.0f, 6487.0f, 6488.0f, 6489.0f, 6490.0f, 6491.0f, 6492.0f, 6493.0f, 6494.0f, 6495.0f, 6496.0f, 6497.0f, 6498.0f, 6499.0f, 6500.0f, 6501.0f, 6502.0f, 6503.0f, 6504.0f, 6505.0f, 6506.0f, 6507.0f, 6508.0f, 6509.0f, 6510.0f, 6511.0f, 6512.0f, 6513.0f, 6514.0f, 6515.0f, 6516.0f, 6517.0f, 6518.0f, 6519.0f, 6520.0f, 6521.0f, 6522.0f, 6523.0f, 6524.0f, 6525.0f, 6526.0f, 6527.0f, 6528.0f, 6529.0f, 6530.0f, 6531.0f, 6532.0f, 6533.0f, 6534.0f, 6535.0f, 6536.0f, 6537.0f, 6538.0f, 6539.0f, 6540.0f, 6541.0f, 6542.0f, 6543.0f, 6544.0f, 6545.0f, 6546.0f, 6547.0f, 6548.0f, 6549.0f, 6550.0f, 6551.0f, 6552.0f, 6553.0f, 6554.0f, 6555.0f, 6556.0f, 6557.0f, 6558.0f, 6559.0f, 6560.0f, 6561.0f, 6562.0f, 6563.0f, 6564.0f, 6565.0f, 6566.0f, 6567.0f, 6568.0f, 6569.0f, 6570.0f, 6571.0f, 6572.0f, 6573.0f, 6574.0f, 6575.0f, 6576.0f, 6577.0f, 6578.0f, 6579.0f, 6580.0f, 6581.0f, 6582.0f, 6583.0f, 6584.0f, 6585.0f, 6586.0f, 6587.0f, 6588.0f, 6589.0f, 6590.0f, 6591.0f, 6592.0f, 6593.0f, 6594.0f, 6595.0f, 6596.0f, 6597.0f, 6598.0f, 6599.0f, 6600.0f, 6601.0f, 6602.0f, 6603.0f, 6604.0f, 6605.0f, 6606.0f, 6607.0f, 6608.0f, 6609.0f, 6610.0f, 6611.0f, 6612.0f, 6613.0f, 6614.0f, 6615.0f, 6616.0f, 6617.0f, 6618.0f, 6619.0f, 6620.0f, 6621.0f, 6622.0f, 6623.0f, 6624.0f, 6625.0f, 6626.0f, 6627.0f, 6628.0f, 6629.0f, 6630.0f, 6631.0f, 6632.0f, 6633.0f, 6634.0f, 6635.0f, 6636.0f, 6637.0f, 6638.0f, 6639.0f, 6640.0f, 6641.0f, 6642.0f, 6643.0f, 6644.0f, 6645.0f, 6646.0f, 6647.0f, 6648.0f, 6649.0f, 6650.0f, 6651.0f, 6652.0f, 6653.0f, 6654.0f, 6655.0f, 6656.0f, 6657.0f, 6658.0f, 6659.0f, 6660.0f, 6661.0f, 6662.0f, 6663.0f, 6664.0f, 6665.0f, 6666.0f, 6667.0f, 6668.0f, 6669.0f, 6670.0f, 6671.0f, 6672.0f, 6673.0f, 6674.0f, 6675.0f, 6676.0f, 6677.0f, 6678.0f, 6679.0f, 6680.0f, 6681.0f, 6682.0f, 6683.0f, 6684.0f, 6685.0f, 6686.0f, 6687.0f, 6688.0f, 6689.0f, 6690.0f, 6691.0f, 6692.0f, 6693.0f, 6694.0f, 6695.0f, 6696.0f, 6697.0f, 6698.0f, 6699.0f, 6700.0f, 6701.0f, 6702.0f, 6703.0f, 6704.0f, 6705.0f, 6706.0f, 6707.0f, 6708.0f, 6709.0f, 6710.0f, 6711.0f, 6712.0f, 6713.0f, 6714.0f, 6715.0f, 6716.0f, 6717.0f, 6718.0f, 6719.0f, 6720.0f, 6721.0f, 6722.0f, 6723.0f, 6724.0f, 6725.0f, 6726.0f, 6727.0f, 6728.0f, 6729.0f, 6730.0f, 6731.0f, 6732.0f, 6733.0f, 6734.0f, 6735.0f, 6736.0f, 6737.0f, 6738.0f, 6739.0f, 6740.0f, 6741.0f, 6742.0f, 6743.0f, 6744.0f, 6745.0f, 6746.0f, 6747.0f, 6748.0f, 6749.0f, 6750.0f, 6751.0f, 6752.0f, 6753.0f, 6754.0f, 6755.0f, 6756.0f, 6757.0f, 6758.0f, 6759.0f, 6760.0f, 6761.0f, 6762.0f, 6763.0f, 6764.0f, 6765.0f, 6766.0f, 6767.0f, 6768.0f, 6769.0f, 6770.0f, 6771.0f, 6772.0f, 6773.0f, 6774.0f, 6775.0f, 6776.0f, 6777.0f, 6778.0f, 6779.0f, 6780.0f, 6781.0f, 6782.0f, 6783.0f, 6784.0f, 6785.0f, 6786.0f, 6787.0f, 6788.0f, 6789.0f, 6790.0f, 6791.0f, 6792.0f, 6793.0f, 6794.0f, 6795.0f, 6796.0f, 6797.0f, 6798.0f, 6799.0f, 6800.0f, 6801.0f, 6802.0f, 6803.0f, 6804.0f, 6805.0f, 6806.0f, 6807.0f, 6808.0f, 6809.0f, 6810.0f, 6811.0f, 6812.0f, 6813.0f, 6814.0f, 6815.0f, 6816.0f, 6817.0f, 6818.0f, 6819.0f, 6820.0f, 6821.0f, 6822.0f, 6823.0f, 6824.0f, 6825.0f, 6826.0f, 6827.0f, 6828.0f, 6829.0f, 6830.0f, 6831.0f, 6832.0f, 6833.0f, 6834.0f, 6835.0f, 6836.0f, 6837.0f, 6838.0f, 6839.0f, 6840.0f, 6841.0f, 6842.0f, 6843.0f, 6844.0f, 6845.0f, 6846.0f, 6847.0f, 6848.0f, 6849.0f, 6850.0f, 6851.0f, 6852.0f, 6853.0f, 6854.0f, 6855.0f, 6856.0f, 6857.0f, 6858.0f, 6859.0f, 6860.0f, 6861.0f, 6862.0f, 6863.0f, 6864.0f, 6865.0f, 6866.0f, 6867.0f, 6868.0f, 6869.0f, 6870.0f, 6871.0f, 6872.0f, 6873.0f, 6874.0f, 6875.0f, 6876.0f, 6877.0f, 6878.0f, 6879.0f, 6880.0f, 6881.0f, 6882.0f, 6883.0f, 6884.0f, 6885.0f, 6886.0f, 6887.0f, 6888.0f, 6889.0f, 6890.0f, 6891.0f, 6892.0f, 6893.0f, 6894.0f, 6895.0f, 6896.0f, 6897.0f, 6898.0f, 6899.0f, 6900.0f, 6901.0f, 6902.0f, 6903.0f, 6904.0f, 6905.0f, 6906.0f, 6907.0f, 6908.0f, 6909.0f, 6910.0f, 6911.0f, 6912.0f, 6913.0f, 6914.0f, 6915.0f, 6916.0f, 6917.0f, 6918.0f, 6919.0f, 6920.0f, 6921.0f, 6922.0f, 6923.0f, 6924.0f, 6925.0f, 6926.0f, 6927.0f, 6928.0f, 6929.0f, 6930.0f, 6931.0f, 6932.0f, 6933.0f, 6934.0f, 6935.0f, 6936.0f, 6937.0f, 6938.0f, 6939.0f, 6940.0f, 6941.0f, 6942.0f, 6943.0f, 6944.0f, 6945.0f, 6946.0f, 6947.0f, 6948.0f, 6949.0f, 6950.0f, 6951.0f, 6952.0f, 6953.0f, 6954.0f, 6955.0f, 6956.0f, 6957.0f, 6958.0f, 6959.0f, 6960.0f, 6961.0f, 6962.0f, 6963.0f, 6964.0f, 6965.0f, 6966.0f, 6967.0f, 6968.0f, 6969.0f, 6970.0f, 6971.0f, 6972.0f, 6973.0f, 6974.0f, 6975.0f, 6976.0f, 6977.0f, 6978.0f, 6979.0f, 6980.0f, 6981.0f, 6982.0f, 6983.0f, 6984.0f, 6985.0f, 6986.0f, 6987.0f, 6988.0f, 6989.0f, 6990.0f, 6991.0f, 6992.0f, 6993.0f, 6994.0f, 6995.0f, 6996.0f, 6997.0f, 6998.0f, 6999.0f, 7000.0f, 7001.0f, 7002.0f, 7003.0f, 7004.0f, 7005.0f, 7006.0f, 7007.0f, 7008.0f, 7009.0f, 7010.0f, 7011.0f, 7012.0f, 7013.0f, 7014.0f, 7015.0f, 7016.0f, 7017.0f, 7018.0f, 7019.0f, 7020.0f, 7021.0f, 7022.0f, 7023.0f, 7024.0f, 7025.0f, 7026.0f, 7027.0f, 7028.0f, 7029.0f, 7030.0f, 7031.0f, 7032.0f, 7033.0f, 7034.0f, 7035.0f, 7036.0f, 7037.0f, 7038.0f, 7039.0f, 7040.0f, 7041.0f, 7042.0f, 7043.0f, 7044.0f, 7045.0f, 7046.0f, 7047.0f, 7048.0f, 7049.0f, 7050.0f, 7051.0f, 7052.0f, 7053.0f, 7054.0f, 7055.0f, 7056.0f, 7057.0f, 7058.0f, 7059.0f, 7060.0f, 7061.0f, 7062.0f, 7063.0f, 7064.0f, 7065.0f, 7066.0f, 7067.0f, 7068.0f, 7069.0f, 7070.0f, 7071.0f, 7072.0f, 7073.0f, 7074.0f, 7075.0f, 7076.0f, 7077.0f, 7078.0f, 7079.0f, 7080.0f, 7081.0f, 7082.0f, 7083.0f, 7084.0f, 7085.0f, 7086.0f, 7087.0f, 7088.0f, 7089.0f, 7090.0f, 7091.0f, 7092.0f, 7093.0f, 7094.0f, 7095.0f, 7096.0f, 7097.0f, 7098.0f, 7099.0f, 7100.0f, 7101.0f, 7102.0f, 7103.0f, 7104.0f, 7105.0f, 7106.0f, 7107.0f, 7108.0f, 7109.0f, 7110.0f, 7111.0f, 7112.0f, 7113.0f, 7114.0f, 7115.0f, 7116.0f, 7117.0f, 7118.0f, 7119.0f, 7120.0f, 7121.0f, 7122.0f, 7123.0f, 7124.0f, 7125.0f, 7126.0f, 7127.0f, 7128.0f, 7129.0f, 7130.0f, 7131.0f, 7132.0f, 7133.0f, 7134.0f, 7135.0f, 7136.0f, 7137.0f, 7138.0f, 7139.0f, 7140.0f, 7141.0f, 7142.0f, 7143.0f, 7144.0f, 7145.0f, 7146.0f, 7147.0f, 7148.0f, 7149.0f, 7150.0f, 7151.0f, 7152.0f, 7153.0f, 7154.0f, 7155.0f, 7156.0f, 7157.0f, 7158.0f, 7159.0f, 7160.0f, 7161.0f, 7162.0f, 7163.0f, 7164.0f, 7165.0f, 7166.0f, 7167.0f, 7168.0f, 7169.0f, 7170.0f, 7171.0f, 7172.0f, 7173.0f, 7174.0f, 7175.0f, 7176.0f, 7177.0f, 7178.0f, 7179.0f, 7180.0f, 7181.0f, 7182.0f, 7183.0f, 7184.0f, 7185.0f, 7186.0f, 7187.0f, 7188.0f, 7189.0f, 7190.0f, 7191.0f, 7192.0f, 7193.0f, 7194.0f, 7195.0f, 7196.0f, 7197.0f, 7198.0f, 7199.0f, 7200.0f, 7201.0f, 7202.0f, 7203.0f, 7204.0f, 7205.0f, 7206.0f, 7207.0f, 7208.0f, 7209.0f, 7210.0f, 7211.0f, 7212.0f, 7213.0f, 7214.0f, 7215.0f, 7216.0f, 7217.0f, 7218.0f, 7219.0f, 7220.0f, 7221.0f, 7222.0f, 7223.0f, 7224.0f, 7225.0f, 7226.0f, 7227.0f, 7228.0f, 7229.0f, 7230.0f, 7231.0f, 7232.0f, 7233.0f, 7234.0f, 7235.0f, 7236.0f, 7237.0f, 7238.0f, 7239.0f, 7240.0f, 7241.0f, 7242.0f, 7243.0f, 7244.0f, 7245.0f, 7246.0f, 7247.0f, 7248.0f, 7249.0f, 7250.0f, 7251.0f, 7252.0f, 7253.0f, 7254.0f, 7255.0f, 7256.0f, 7257.0f, 7258.0f, 7259.0f, 7260.0f, 7261.0f, 7262.0f, 7263.0f, 7264.0f, 7265.0f, 7266.0f, 7267.0f, 7268.0f, 7269.0f, 7270.0f, 7271.0f, 7272.0f, 7273.0f, 7274.0f, 7275.0f, 7276.0f, 7277.0f, 7278.0f, 7279.0f, 7280.0f, 7281.0f, 7282.0f, 7283.0f, 7284.0f, 7285.0f, 7286.0f, 7287.0f, 7288.0f, 7289.0f, 7290.0f, 7291.0f, 7292.0f, 7293.0f, 7294.0f, 7295.0f, 7296.0f, 7297.0f, 7298.0f, 7299.0f, 7300.0f, 7301.0f, 7302.0f, 7303.0f, 7304.0f, 7305.0f, 7306.0f, 7307.0f, 7308.0f, 7309.0f, 7310.0f, 7311.0f, 7312.0f, 7313.0f, 7314.0f, 7315.0f, 7316.0f, 7317.0f, 7318.0f, 7319.0f, 7320.0f, 7321.0f, 7322.0f, 7323.0f, 7324.0f, 7325.0f, 7326.0f, 7327.0f, 7328.0f, 7329.0f, 7330.0f, 7331.0f, 7332.0f, 7333.0f, 7334.0f, 7335.0f, 7336.0f, 7337.0f, 7338.0f, 7339.0f, 7340.0f, 7341.0f, 7342.0f, 7343.0f, 7344.0f, 7345.0f, 7346.0f, 7347.0f, 7348.0f, 7349.0f, 7350.0f, 7351.0f, 7352.0f, 7353.0f, 7354.0f, 7355.0f, 7356.0f, 7357.0f, 7358.0f, 7359.0f, 7360.0f, 7361.0f, 7362.0f, 7363.0f, 7364.0f, 7365.0f, 7366.0f, 7367.0f, 7368.0f, 7369.0f, 7370.0f, 7371.0f, 7372.0f, 7373.0f, 7374.0f, 7375.0f, 7376.0f, 7377.0f, 7378.0f, 7379.0f, 7380.0f, 7381.0f, 7382.0f, 7383.0f, 7384.0f, 7385.0f, 7386.0f, 7387.0f, 7388.0f, 7389.0f, 7390.0f, 7391.0f, 7392.0f, 7393.0f, 7394.0f, 7395.0f, 7396.0f, 7397.0f, 7398.0f, 7399.0f, 7400.0f, 7401.0f, 7402.0f, 7403.0f, 7404.0f, 7405.0f, 7406.0f, 7407.0f, 7408.0f, 7409.0f, 7410.0f, 7411.0f, 7412.0f, 7413.0f, 7414.0f, 7415.0f, 7416.0f, 7417.0f, 7418.0f, 7419.0f, 7420.0f, 7421.0f, 7422.0f, 7423.0f, 7424.0f, 7425.0f, 7426.0f, 7427.0f, 7428.0f, 7429.0f, 7430.0f, 7431.0f, 7432.0f, 7433.0f, 7434.0f, 7435.0f, 7436.0f, 7437.0f, 7438.0f, 7439.0f, 7440.0f, 7441.0f, 7442.0f, 7443.0f, 7444.0f, 7445.0f, 7446.0f, 7447.0f, 7448.0f, 7449.0f, 7450.0f, 7451.0f, 7452.0f, 7453.0f, 7454.0f, 7455.0f, 7456.0f, 7457.0f, 7458.0f, 7459.0f, 7460.0f, 7461.0f, 7462.0f, 7463.0f, 7464.0f, 7465.0f, 7466.0f, 7467.0f, 7468.0f, 7469.0f, 7470.0f, 7471.0f, 7472.0f, 7473.0f, 7474.0f, 7475.0f, 7476.0f, 7477.0f, 7478.0f, 7479.0f, 7480.0f, 7481.0f, 7482.0f, 7483.0f, 7484.0f, 7485.0f, 7486.0f, 7487.0f, 7488.0f, 7489.0f, 7490.0f, 7491.0f, 7492.0f, 7493.0f, 7494.0f, 7495.0f, 7496.0f, 7497.0f, 7498.0f, 7499.0f, 7500.0f, 7501.0f, 7502.0f, 7503.0f, 7504.0f, 7505.0f, 7506.0f, 7507.0f, 7508.0f, 7509.0f, 7510.0f, 7511.0f, 7512.0f, 7513.0f, 7514.0f, 7515.0f, 7516.0f, 7517.0f, 7518.0f, 7519.0f, 7520.0f, 7521.0f, 7522.0f, 7523.0f, 7524.0f, 7525.0f, 7526.0f, 7527.0f, 7528.0f, 7529.0f, 7530.0f, 7531.0f, 7532.0f, 7533.0f, 7534.0f, 7535.0f, 7536.0f, 7537.0f, 7538.0f, 7539.0f, 7540.0f, 7541.0f, 7542.0f, 7543.0f, 7544.0f, 7545.0f, 7546.0f, 7547.0f, 7548.0f, 7549.0f, 7550.0f, 7551.0f, 7552.0f, 7553.0f, 7554.0f, 7555.0f, 7556.0f, 7557.0f, 7558.0f, 7559.0f, 7560.0f, 7561.0f, 7562.0f, 7563.0f, 7564.0f, 7565.0f, 7566.0f, 7567.0f, 7568.0f, 7569.0f, 7570.0f, 7571.0f, 7572.0f, 7573.0f, 7574.0f, 7575.0f, 7576.0f, 7577.0f, 7578.0f, 7579.0f, 7580.0f, 7581.0f, 7582.0f, 7583.0f, 7584.0f, 7585.0f, 7586.0f, 7587.0f, 7588.0f, 7589.0f, 7590.0f, 7591.0f, 7592.0f, 7593.0f, 7594.0f, 7595.0f, 7596.0f, 7597.0f, 7598.0f, 7599.0f, 7600.0f, 7601.0f, 7602.0f, 7603.0f, 7604.0f, 7605.0f, 7606.0f, 7607.0f, 7608.0f, 7609.0f, 7610.0f, 7611.0f, 7612.0f, 7613.0f, 7614.0f, 7615.0f, 7616.0f, 7617.0f, 7618.0f, 7619.0f, 7620.0f, 7621.0f, 7622.0f, 7623.0f, 7624.0f, 7625.0f, 7626.0f, 7627.0f, 7628.0f, 7629.0f, 7630.0f, 7631.0f, 7632.0f, 7633.0f, 7634.0f, 7635.0f, 7636.0f, 7637.0f, 7638.0f, 7639.0f, 7640.0f, 7641.0f, 7642.0f, 7643.0f, 7644.0f, 7645.0f, 7646.0f, 7647.0f, 7648.0f, 7649.0f, 7650.0f, 7651.0f, 7652.0f, 7653.0f, 7654.0f, 7655.0f, 7656.0f, 7657.0f, 7658.0f, 7659.0f, 7660.0f, 7661.0f, 7662.0f, 7663.0f, 7664.0f, 7665.0f, 7666.0f, 7667.0f, 7668.0f, 7669.0f, 7670.0f, 7671.0f, 7672.0f, 7673.0f, 7674.0f, 7675.0f, 7676.0f, 7677.0f, 7678.0f, 7679.0f, 7680.0f, 7681.0f, 7682.0f, 7683.0f, 7684.0f, 7685.0f, 7686.0f, 7687.0f, 7688.0f, 7689.0f, 7690.0f, 7691.0f, 7692.0f, 7693.0f, 7694.0f, 7695.0f, 7696.0f, 7697.0f, 7698.0f, 7699.0f, 7700.0f, 7701.0f, 7702.0f, 7703.0f, 7704.0f, 7705.0f, 7706.0f, 7707.0f, 7708.0f, 7709.0f, 7710.0f, 7711.0f, 7712.0f, 7713.0f, 7714.0f, 7715.0f, 7716.0f, 7717.0f, 7718.0f, 7719.0f, 7720.0f, 7721.0f, 7722.0f, 7723.0f, 7724.0f, 7725.0f, 7726.0f, 7727.0f, 7728.0f, 7729.0f, 7730.0f, 7731.0f, 7732.0f, 7733.0f, 7734.0f, 7735.0f, 7736.0f, 7737.0f, 7738.0f, 7739.0f, 7740.0f, 7741.0f, 7742.0f, 7743.0f, 7744.0f, 7745.0f, 7746.0f, 7747.0f, 7748.0f, 7749.0f, 7750.0f, 7751.0f, 7752.0f, 7753.0f, 7754.0f, 7755.0f, 7756.0f, 7757.0f, 7758.0f, 7759.0f, 7760.0f, 7761.0f, 7762.0f, 7763.0f, 7764.0f, 7765.0f, 7766.0f, 7767.0f, 7768.0f, 7769.0f, 7770.0f, 7771.0f, 7772.0f, 7773.0f, 7774.0f, 7775.0f, 7776.0f, 7777.0f, 7778.0f, 7779.0f, 7780.0f, 7781.0f, 7782.0f, 7783.0f, 7784.0f, 7785.0f, 7786.0f, 7787.0f, 7788.0f, 7789.0f, 7790.0f, 7791.0f, 7792.0f, 7793.0f, 7794.0f, 7795.0f, 7796.0f, 7797.0f, 7798.0f, 7799.0f, 7800.0f, 7801.0f, 7802.0f, 7803.0f, 7804.0f, 7805.0f, 7806.0f, 7807.0f, 7808.0f, 7809.0f, 7810.0f, 7811.0f, 7812.0f, 7813.0f, 7814.0f, 7815.0f, 7816.0f, 7817.0f, 7818.0f, 7819.0f, 7820.0f, 7821.0f, 7822.0f, 7823.0f, 7824.0f, 7825.0f, 7826.0f, 7827.0f, 7828.0f, 7829.0f, 7830.0f, 7831.0f, 7832.0f, 7833.0f, 7834.0f, 7835.0f, 7836.0f, 7837.0f, 7838.0f, 7839.0f, 7840.0f, 7841.0f, 7842.0f, 7843.0f, 7844.0f, 7845.0f, 7846.0f, 7847.0f, 7848.0f, 7849.0f, 7850.0f, 7851.0f, 7852.0f, 7853.0f, 7854.0f, 7855.0f, 7856.0f, 7857.0f, 7858.0f, 7859.0f, 7860.0f, 7861.0f, 7862.0f, 7863.0f, 7864.0f, 7865.0f, 7866.0f, 7867.0f, 7868.0f, 7869.0f, 7870.0f, 7871.0f, 7872.0f, 7873.0f, 7874.0f, 7875.0f, 7876.0f, 7877.0f, 7878.0f, 7879.0f, 7880.0f, 7881.0f, 7882.0f, 7883.0f, 7884.0f, 7885.0f, 7886.0f, 7887.0f, 7888.0f, 7889.0f, 7890.0f, 7891.0f, 7892.0f, 7893.0f, 7894.0f, 7895.0f, 7896.0f, 7897.0f, 7898.0f, 7899.0f, 7900.0f, 7901.0f, 7902.0f, 7903.0f, 7904.0f, 7905.0f, 7906.0f, 7907.0f, 7908.0f, 7909.0f, 7910.0f, 7911.0f, 7912.0f, 7913.0f, 7914.0f, 7915.0f, 7916.0f, 7917.0f, 7918.0f, 7919.0f, 7920.0f, 7921.0f, 7922.0f, 7923.0f, 7924.0f, 7925.0f, 7926.0f, 7927.0f, 7928.0f, 7929.0f, 7930.0f, 7931.0f, 7932.0f, 7933.0f, 7934.0f, 7935.0f, 7936.0f, 7937.0f, 7938.0f, 7939.0f, 7940.0f, 7941.0f, 7942.0f, 7943.0f, 7944.0f, 7945.0f, 7946.0f, 7947.0f, 7948.0f, 7949.0f, 7950.0f, 7951.0f, 7952.0f, 7953.0f, 7954.0f, 7955.0f, 7956.0f, 7957.0f, 7958.0f, 7959.0f, 7960.0f, 7961.0f, 7962.0f, 7963.0f, 7964.0f, 7965.0f, 7966.0f, 7967.0f, 7968.0f, 7969.0f, 7970.0f, 7971.0f, 7972.0f, 7973.0f, 7974.0f, 7975.0f, 7976.0f, 7977.0f, 7978.0f, 7979.0f, 7980.0f, 7981.0f, 7982.0f, 7983.0f, 7984.0f, 7985.0f, 7986.0f, 7987.0f, 7988.0f, 7989.0f, 7990.0f, 7991.0f, 7992.0f, 7993.0f, 7994.0f, 7995.0f, 7996.0f, 7997.0f, 7998.0f, 7999.0f, 8000.0f, 8001.0f, 8002.0f, 8003.0f, 8004.0f, 8005.0f, 8006.0f, 8007.0f, 8008.0f, 8009.0f, 8010.0f, 8011.0f, 8012.0f, 8013.0f, 8014.0f, 8015.0f, 8016.0f, 8017.0f, 8018.0f, 8019.0f, 8020.0f, 8021.0f, 8022.0f, 8023.0f, 8024.0f, 8025.0f, 8026.0f, 8027.0f, 8028.0f, 8029.0f, 8030.0f, 8031.0f, 8032.0f, 8033.0f, 8034.0f, 8035.0f, 8036.0f, 8037.0f, 8038.0f, 8039.0f, 8040.0f, 8041.0f, 8042.0f, 8043.0f, 8044.0f, 8045.0f, 8046.0f, 8047.0f, 8048.0f, 8049.0f, 8050.0f, 8051.0f, 8052.0f, 8053.0f, 8054.0f, 8055.0f, 8056.0f, 8057.0f, 8058.0f, 8059.0f, 8060.0f, 8061.0f, 8062.0f, 8063.0f, 8064.0f, 8065.0f, 8066.0f, 8067.0f, 8068.0f, 8069.0f, 8070.0f, 8071.0f, 8072.0f, 8073.0f, 8074.0f, 8075.0f, 8076.0f, 8077.0f, 8078.0f, 8079.0f, 8080.0f, 8081.0f, 8082.0f, 8083.0f, 8084.0f, 8085.0f, 8086.0f, 8087.0f, 8088.0f, 8089.0f, 8090.0f, 8091.0f, 8092.0f, 8093.0f, 8094.0f, 8095.0f, 8096.0f, 8097.0f, 8098.0f, 8099.0f, 8100.0f, 8101.0f, 8102.0f, 8103.0f, 8104.0f, 8105.0f, 8106.0f, 8107.0f, 8108.0f, 8109.0f, 8110.0f, 8111.0f, 8112.0f, 8113.0f, 8114.0f, 8115.0f, 8116.0f, 8117.0f, 8118.0f, 8119.0f, 8120.0f, 8121.0f, 8122.0f, 8123.0f, 8124.0f, 8125.0f, 8126.0f, 8127.0f, 8128.0f, 8129.0f, 8130.0f, 8131.0f, 8132.0f, 8133.0f, 8134.0f, 8135.0f, 8136.0f, 8137.0f, 8138.0f, 8139.0f, 8140.0f, 8141.0f, 8142.0f, 8143.0f, 8144.0f, 8145.0f, 8146.0f, 8147.0f, 8148.0f, 8149.0f, 8150.0f, 8151.0f, 8152.0f, 8153.0f, 8154.0f, 8155.0f, 8156.0f, 8157.0f, 8158.0f, 8159.0f, 8160.0f, 8161.0f, 8162.0f, 8163.0f, 8164.0f, 8165.0f, 8166.0f, 8167.0f, 8168.0f, 8169.0f, 8170.0f, 8171.0f, 8172.0f, 8173.0f, 8174.0f, 8175.0f, 8176.0f, 8177.0f, 8178.0f, 8179.0f, 8180.0f, 8181.0f, 8182.0f, 8183.0f, 8184.0f, 8185.0f, 8186.0f, 8187.0f, 8188.0f, 8189.0f, 8190.0f, 8191.0f, 8192.0f, 8193.0f, 8194.0f, 8195.0f, 8196.0f, 8197.0f, 8198.0f, 8199.0f, 8200.0f, 8201.0f, 8202.0f, 8203.0f, 8204.0f, 8205.0f, 8206.0f, 8207.0f, 8208.0f, 8209.0f, 8210.0f, 8211.0f, 8212.0f, 8213.0f, 8214.0f, 8215.0f, 8216.0f, 8217.0f, 8218.0f, 8219.0f, 8220.0f, 8221.0f, 8222.0f, 8223.0f, 8224.0f, 8225.0f, 8226.0f, 8227.0f, 8228.0f, 8229.0f, 8230.0f, 8231.0f, 8232.0f, 8233.0f, 8234.0f, 8235.0f, 8236.0f, 8237.0f, 8238.0f, 8239.0f, 8240.0f, 8241.0f, 8242.0f, 8243.0f, 8244.0f, 8245.0f, 8246.0f, 8247.0f, 8248.0f, 8249.0f, 8250.0f, 8251.0f, 8252.0f, 8253.0f, 8254.0f, 8255.0f, 8256.0f, 8257.0f, 8258.0f, 8259.0f, 8260.0f, 8261.0f, 8262.0f, 8263.0f, 8264.0f, 8265.0f, 8266.0f, 8267.0f, 8268.0f, 8269.0f, 8270.0f, 8271.0f, 8272.0f, 8273.0f, 8274.0f, 8275.0f, 8276.0f, 8277.0f, 8278.0f, 8279.0f, 8280.0f, 8281.0f, 8282.0f, 8283.0f, 8284.0f, 8285.0f, 8286.0f, 8287.0f, 8288.0f, 8289.0f, 8290.0f, 8291.0f, 8292.0f, 8293.0f, 8294.0f, 8295.0f, 8296.0f, 8297.0f, 8298.0f, 8299.0f, 8300.0f, 8301.0f, 8302.0f, 8303.0f, 8304.0f, 8305.0f, 8306.0f, 8307.0f, 8308.0f, 8309.0f, 8310.0f, 8311.0f, 8312.0f, 8313.0f, 8314.0f, 8315.0f, 8316.0f, 8317.0f, 8318.0f, 8319.0f, 8320.0f, 8321.0f, 8322.0f, 8323.0f, 8324.0f, 8325.0f, 8326.0f, 8327.0f, 8328.0f, 8329.0f, 8330.0f, 8331.0f, 8332.0f, 8333.0f, 8334.0f, 8335.0f, 8336.0f, 8337.0f, 8338.0f, 8339.0f, 8340.0f, 8341.0f, 8342.0f, 8343.0f, 8344.0f, 8345.0f, 8346.0f, 8347.0f, 8348.0f, 8349.0f, 8350.0f, 8351.0f, 8352.0f, 8353.0f, 8354.0f, 8355.0f, 8356.0f, 8357.0f, 8358.0f, 8359.0f, 8360.0f, 8361.0f, 8362.0f, 8363.0f, 8364.0f, 8365.0f, 8366.0f, 8367.0f, 8368.0f, 8369.0f, 8370.0f, 8371.0f, 8372.0f, 8373.0f, 8374.0f, 8375.0f, 8376.0f, 8377.0f, 8378.0f, 8379.0f, 8380.0f, 8381.0f, 8382.0f, 8383.0f, 8384.0f, 8385.0f, 8386.0f, 8387.0f, 8388.0f, 8389.0f, 8390.0f, 8391.0f, 8392.0f, 8393.0f, 8394.0f, 8395.0f, 8396.0f, 8397.0f, 8398.0f, 8399.0f, 8400.0f, 8401.0f, 8402.0f, 8403.0f, 8404.0f, 8405.0f, 8406.0f, 8407.0f, 8408.0f, 8409.0f, 8410.0f, 8411.0f, 8412.0f, 8413.0f, 8414.0f, 8415.0f, 8416.0f, 8417.0f, 8418.0f, 8419.0f, 8420.0f, 8421.0f, 8422.0f, 8423.0f, 8424.0f, 8425.0f, 8426.0f, 8427.0f, 8428.0f, 8429.0f, 8430.0f, 8431.0f, 8432.0f, 8433.0f, 8434.0f, 8435.0f, 8436.0f, 8437.0f, 8438.0f, 8439.0f, 8440.0f, 8441.0f, 8442.0f, 8443.0f, 8444.0f, 8445.0f, 8446.0f, 8447.0f, 8448.0f, 8449.0f, 8450.0f, 8451.0f, 8452.0f, 8453.0f, 8454.0f, 8455.0f, 8456.0f, 8457.0f, 8458.0f, 8459.0f, 8460.0f, 8461.0f, 8462.0f, 8463.0f, 8464.0f, 8465.0f, 8466.0f, 8467.0f, 8468.0f, 8469.0f, 8470.0f, 8471.0f, 8472.0f, 8473.0f, 8474.0f, 8475.0f, 8476.0f, 8477.0f, 8478.0f, 8479.0f, 8480.0f, 8481.0f, 8482.0f, 8483.0f, 8484.0f, 8485.0f, 8486.0f, 8487.0f, 8488.0f, 8489.0f, 8490.0f, 8491.0f, 8492.0f, 8493.0f, 8494.0f, 8495.0f, 8496.0f, 8497.0f, 8498.0f, 8499.0f, 8500.0f, 8501.0f, 8502.0f, 8503.0f, 8504.0f, 8505.0f, 8506.0f, 8507.0f, 8508.0f, 8509.0f, 8510.0f, 8511.0f, 8512.0f, 8513.0f, 8514.0f, 8515.0f, 8516.0f, 8517.0f, 8518.0f, 8519.0f, 8520.0f, 8521.0f, 8522.0f, 8523.0f, 8524.0f, 8525.0f, 8526.0f, 8527.0f, 8528.0f, 8529.0f, 8530.0f, 8531.0f, 8532.0f, 8533.0f, 8534.0f, 8535.0f, 8536.0f, 8537.0f, 8538.0f, 8539.0f, 8540.0f, 8541.0f, 8542.0f, 8543.0f, 8544.0f, 8545.0f, 8546.0f, 8547.0f, 8548.0f, 8549.0f, 8550.0f, 8551.0f, 8552.0f, 8553.0f, 8554.0f, 8555.0f, 8556.0f, 8557.0f, 8558.0f, 8559.0f, 8560.0f, 8561.0f, 8562.0f, 8563.0f, 8564.0f, 8565.0f, 8566.0f, 8567.0f, 8568.0f, 8569.0f, 8570.0f, 8571.0f, 8572.0f, 8573.0f, 8574.0f, 8575.0f, 8576.0f, 8577.0f, 8578.0f, 8579.0f, 8580.0f, 8581.0f, 8582.0f, 8583.0f, 8584.0f, 8585.0f, 8586.0f, 8587.0f, 8588.0f, 8589.0f, 8590.0f, 8591.0f, 8592.0f, 8593.0f, 8594.0f, 8595.0f, 8596.0f, 8597.0f, 8598.0f, 8599.0f, 8600.0f, 8601.0f, 8602.0f, 8603.0f, 8604.0f, 8605.0f, 8606.0f, 8607.0f, 8608.0f, 8609.0f, 8610.0f, 8611.0f, 8612.0f, 8613.0f, 8614.0f, 8615.0f, 8616.0f, 8617.0f, 8618.0f, 8619.0f, 8620.0f, 8621.0f, 8622.0f, 8623.0f, 8624.0f, 8625.0f, 8626.0f, 8627.0f, 8628.0f, 8629.0f, 8630.0f, 8631.0f, 8632.0f, 8633.0f, 8634.0f, 8635.0f, 8636.0f, 8637.0f, 8638.0f, 8639.0f, 8640.0f, 8641.0f, 8642.0f, 8643.0f, 8644.0f, 8645.0f, 8646.0f, 8647.0f, 8648.0f, 8649.0f, 8650.0f, 8651.0f, 8652.0f, 8653.0f, 8654.0f, 8655.0f, 8656.0f, 8657.0f, 8658.0f, 8659.0f, 8660.0f, 8661.0f, 8662.0f, 8663.0f, 8664.0f, 8665.0f, 8666.0f, 8667.0f, 8668.0f, 8669.0f, 8670.0f, 8671.0f, 8672.0f, 8673.0f, 8674.0f, 8675.0f, 8676.0f, 8677.0f, 8678.0f, 8679.0f, 8680.0f, 8681.0f, 8682.0f, 8683.0f, 8684.0f, 8685.0f, 8686.0f, 8687.0f, 8688.0f, 8689.0f, 8690.0f, 8691.0f, 8692.0f, 8693.0f, 8694.0f, 8695.0f, 8696.0f, 8697.0f, 8698.0f, 8699.0f, 8700.0f, 8701.0f, 8702.0f, 8703.0f, 8704.0f, 8705.0f, 8706.0f, 8707.0f, 8708.0f, 8709.0f, 8710.0f, 8711.0f, 8712.0f, 8713.0f, 8714.0f, 8715.0f, 8716.0f, 8717.0f, 8718.0f, 8719.0f, 8720.0f, 8721.0f, 8722.0f, 8723.0f, 8724.0f, 8725.0f, 8726.0f, 8727.0f, 8728.0f, 8729.0f, 8730.0f, 8731.0f, 8732.0f, 8733.0f, 8734.0f, 8735.0f, 8736.0f, 8737.0f, 8738.0f, 8739.0f, 8740.0f, 8741.0f, 8742.0f, 8743.0f, 8744.0f, 8745.0f, 8746.0f, 8747.0f, 8748.0f, 8749.0f, 8750.0f, 8751.0f, 8752.0f, 8753.0f, 8754.0f, 8755.0f, 8756.0f, 8757.0f, 8758.0f, 8759.0f, 8760.0f, 8761.0f, 8762.0f, 8763.0f, 8764.0f, 8765.0f, 8766.0f, 8767.0f, 8768.0f, 8769.0f, 8770.0f, 8771.0f, 8772.0f, 8773.0f, 8774.0f, 8775.0f, 8776.0f, 8777.0f, 8778.0f, 8779.0f, 8780.0f, 8781.0f, 8782.0f, 8783.0f, 8784.0f, 8785.0f, 8786.0f, 8787.0f, 8788.0f, 8789.0f, 8790.0f, 8791.0f, 8792.0f, 8793.0f, 8794.0f, 8795.0f, 8796.0f, 8797.0f, 8798.0f, 8799.0f, 8800.0f, 8801.0f, 8802.0f, 8803.0f, 8804.0f, 8805.0f, 8806.0f, 8807.0f, 8808.0f, 8809.0f, 8810.0f, 8811.0f, 8812.0f, 8813.0f, 8814.0f, 8815.0f, 8816.0f, 8817.0f, 8818.0f, 8819.0f, 8820.0f, 8821.0f, 8822.0f, 8823.0f, 8824.0f, 8825.0f, 8826.0f, 8827.0f, 8828.0f, 8829.0f, 8830.0f, 8831.0f, 8832.0f, 8833.0f, 8834.0f, 8835.0f, 8836.0f, 8837.0f, 8838.0f, 8839.0f, 8840.0f, 8841.0f, 8842.0f, 8843.0f, 8844.0f, 8845.0f, 8846.0f, 8847.0f, 8848.0f, 8849.0f, 8850.0f, 8851.0f, 8852.0f, 8853.0f, 8854.0f, 8855.0f, 8856.0f, 8857.0f, 8858.0f, 8859.0f, 8860.0f, 8861.0f, 8862.0f, 8863.0f, 8864.0f, 8865.0f, 8866.0f, 8867.0f, 8868.0f, 8869.0f, 8870.0f, 8871.0f, 8872.0f, 8873.0f, 8874.0f, 8875.0f, 8876.0f, 8877.0f, 8878.0f, 8879.0f, 8880.0f, 8881.0f, 8882.0f, 8883.0f, 8884.0f, 8885.0f, 8886.0f, 8887.0f, 8888.0f, 8889.0f, 8890.0f, 8891.0f, 8892.0f, 8893.0f, 8894.0f, 8895.0f, 8896.0f, 8897.0f, 8898.0f, 8899.0f, 8900.0f, 8901.0f, 8902.0f, 8903.0f, 8904.0f, 8905.0f, 8906.0f, 8907.0f, 8908.0f, 8909.0f, 8910.0f, 8911.0f, 8912.0f, 8913.0f, 8914.0f, 8915.0f, 8916.0f, 8917.0f, 8918.0f, 8919.0f, 8920.0f, 8921.0f, 8922.0f, 8923.0f, 8924.0f, 8925.0f, 8926.0f, 8927.0f, 8928.0f, 8929.0f, 8930.0f, 8931.0f, 8932.0f, 8933.0f, 8934.0f, 8935.0f, 8936.0f, 8937.0f, 8938.0f, 8939.0f, 8940.0f, 8941.0f, 8942.0f, 8943.0f, 8944.0f, 8945.0f, 8946.0f, 8947.0f, 8948.0f, 8949.0f, 8950.0f, 8951.0f, 8952.0f, 8953.0f, 8954.0f, 8955.0f, 8956.0f, 8957.0f, 8958.0f, 8959.0f, 8960.0f, 8961.0f, 8962.0f, 8963.0f, 8964.0f, 8965.0f, 8966.0f, 8967.0f, 8968.0f, 8969.0f, 8970.0f, 8971.0f, 8972.0f, 8973.0f, 8974.0f, 8975.0f, 8976.0f, 8977.0f, 8978.0f, 8979.0f, 8980.0f, 8981.0f, 8982.0f, 8983.0f, 8984.0f, 8985.0f, 8986.0f, 8987.0f, 8988.0f, 8989.0f, 8990.0f, 8991.0f, 8992.0f, 8993.0f, 8994.0f, 8995.0f, 8996.0f, 8997.0f, 8998.0f, 8999.0f, 9000.0f, 9001.0f, 9002.0f, 9003.0f, 9004.0f, 9005.0f, 9006.0f, 9007.0f, 9008.0f, 9009.0f, 9010.0f, 9011.0f, 9012.0f, 9013.0f, 9014.0f, 9015.0f, 9016.0f, 9017.0f, 9018.0f, 9019.0f, 9020.0f, 9021.0f, 9022.0f, 9023.0f, 9024.0f, 9025.0f, 9026.0f, 9027.0f, 9028.0f, 9029.0f, 9030.0f, 9031.0f, 9032.0f, 9033.0f, 9034.0f, 9035.0f, 9036.0f, 9037.0f, 9038.0f, 9039.0f, 9040.0f, 9041.0f, 9042.0f, 9043.0f, 9044.0f, 9045.0f, 9046.0f, 9047.0f, 9048.0f, 9049.0f, 9050.0f, 9051.0f, 9052.0f, 9053.0f, 9054.0f, 9055.0f, 9056.0f, 9057.0f, 9058.0f, 9059.0f, 9060.0f, 9061.0f, 9062.0f, 9063.0f, 9064.0f, 9065.0f, 9066.0f, 9067.0f, 9068.0f, 9069.0f, 9070.0f, 9071.0f, 9072.0f, 9073.0f, 9074.0f, 9075.0f, 9076.0f, 9077.0f, 9078.0f, 9079.0f, 9080.0f, 9081.0f, 9082.0f, 9083.0f, 9084.0f, 9085.0f, 9086.0f, 9087.0f, 9088.0f, 9089.0f, 9090.0f, 9091.0f, 9092.0f, 9093.0f, 9094.0f, 9095.0f, 9096.0f, 9097.0f, 9098.0f, 9099.0f, 9100.0f, 9101.0f, 9102.0f, 9103.0f, 9104.0f, 9105.0f, 9106.0f, 9107.0f, 9108.0f, 9109.0f, 9110.0f, 9111.0f, 9112.0f, 9113.0f, 9114.0f, 9115.0f, 9116.0f, 9117.0f, 9118.0f, 9119.0f, 9120.0f, 9121.0f, 9122.0f, 9123.0f, 9124.0f, 9125.0f, 9126.0f, 9127.0f, 9128.0f, 9129.0f, 9130.0f, 9131.0f, 9132.0f, 9133.0f, 9134.0f, 9135.0f, 9136.0f, 9137.0f, 9138.0f, 9139.0f, 9140.0f, 9141.0f, 9142.0f, 9143.0f, 9144.0f, 9145.0f, 9146.0f, 9147.0f, 9148.0f, 9149.0f, 9150.0f, 9151.0f, 9152.0f, 9153.0f, 9154.0f, 9155.0f, 9156.0f, 9157.0f, 9158.0f, 9159.0f, 9160.0f, 9161.0f, 9162.0f, 9163.0f, 9164.0f, 9165.0f, 9166.0f, 9167.0f, 9168.0f, 9169.0f, 9170.0f, 9171.0f, 9172.0f, 9173.0f, 9174.0f, 9175.0f, 9176.0f, 9177.0f, 9178.0f, 9179.0f, 9180.0f, 9181.0f, 9182.0f, 9183.0f, 9184.0f, 9185.0f, 9186.0f, 9187.0f, 9188.0f, 9189.0f, 9190.0f, 9191.0f, 9192.0f, 9193.0f, 9194.0f, 9195.0f, 9196.0f, 9197.0f, 9198.0f, 9199.0f, 9200.0f, 9201.0f, 9202.0f, 9203.0f, 9204.0f, 9205.0f, 9206.0f, 9207.0f, 9208.0f, 9209.0f, 9210.0f, 9211.0f, 9212.0f, 9213.0f, 9214.0f, 9215.0f, 9216.0f, 9217.0f, 9218.0f, 9219.0f, 9220.0f, 9221.0f, 9222.0f, 9223.0f, 9224.0f, 9225.0f, 9226.0f, 9227.0f, 9228.0f, 9229.0f, 9230.0f, 9231.0f, 9232.0f, 9233.0f, 9234.0f, 9235.0f, 9236.0f, 9237.0f, 9238.0f, 9239.0f, 9240.0f, 9241.0f, 9242.0f, 9243.0f, 9244.0f, 9245.0f, 9246.0f, 9247.0f, 9248.0f, 9249.0f, 9250.0f, 9251.0f, 9252.0f, 9253.0f, 9254.0f, 9255.0f, 9256.0f, 9257.0f, 9258.0f, 9259.0f, 9260.0f, 9261.0f, 9262.0f, 9263.0f, 9264.0f, 9265.0f, 9266.0f, 9267.0f, 9268.0f, 9269.0f, 9270.0f, 9271.0f, 9272.0f, 9273.0f, 9274.0f, 9275.0f, 9276.0f, 9277.0f, 9278.0f, 9279.0f, 9280.0f, 9281.0f, 9282.0f, 9283.0f, 9284.0f, 9285.0f, 9286.0f, 9287.0f, 9288.0f, 9289.0f, 9290.0f, 9291.0f, 9292.0f, 9293.0f, 9294.0f, 9295.0f, 9296.0f, 9297.0f, 9298.0f, 9299.0f, 9300.0f, 9301.0f, 9302.0f, 9303.0f, 9304.0f, 9305.0f, 9306.0f, 9307.0f, 9308.0f, 9309.0f, 9310.0f, 9311.0f, 9312.0f, 9313.0f, 9314.0f, 9315.0f, 9316.0f, 9317.0f, 9318.0f, 9319.0f, 9320.0f, 9321.0f, 9322.0f, 9323.0f, 9324.0f, 9325.0f, 9326.0f, 9327.0f, 9328.0f, 9329.0f, 9330.0f, 9331.0f, 9332.0f, 9333.0f, 9334.0f, 9335.0f, 9336.0f, 9337.0f, 9338.0f, 9339.0f, 9340.0f, 9341.0f, 9342.0f, 9343.0f, 9344.0f, 9345.0f, 9346.0f, 9347.0f, 9348.0f, 9349.0f, 9350.0f, 9351.0f, 9352.0f, 9353.0f, 9354.0f, 9355.0f, 9356.0f, 9357.0f, 9358.0f, 9359.0f, 9360.0f, 9361.0f, 9362.0f, 9363.0f, 9364.0f, 9365.0f, 9366.0f, 9367.0f, 9368.0f, 9369.0f, 9370.0f, 9371.0f, 9372.0f, 9373.0f, 9374.0f, 9375.0f, 9376.0f, 9377.0f, 9378.0f, 9379.0f, 9380.0f, 9381.0f, 9382.0f, 9383.0f, 9384.0f, 9385.0f, 9386.0f, 9387.0f, 9388.0f, 9389.0f, 9390.0f, 9391.0f, 9392.0f, 9393.0f, 9394.0f, 9395.0f, 9396.0f, 9397.0f, 9398.0f, 9399.0f, 9400.0f, 9401.0f, 9402.0f, 9403.0f, 9404.0f, 9405.0f, 9406.0f, 9407.0f, 9408.0f, 9409.0f, 9410.0f, 9411.0f, 9412.0f, 9413.0f, 9414.0f, 9415.0f, 9416.0f, 9417.0f, 9418.0f, 9419.0f, 9420.0f, 9421.0f, 9422.0f, 9423.0f, 9424.0f, 9425.0f, 9426.0f, 9427.0f, 9428.0f, 9429.0f, 9430.0f, 9431.0f, 9432.0f, 9433.0f, 9434.0f, 9435.0f, 9436.0f, 9437.0f, 9438.0f, 9439.0f, 9440.0f, 9441.0f, 9442.0f, 9443.0f, 9444.0f, 9445.0f, 9446.0f, 9447.0f, 9448.0f, 9449.0f, 9450.0f, 9451.0f, 9452.0f, 9453.0f, 9454.0f, 9455.0f, 9456.0f, 9457.0f, 9458.0f, 9459.0f, 9460.0f, 9461.0f, 9462.0f, 9463.0f, 9464.0f, 9465.0f, 9466.0f, 9467.0f, 9468.0f, 9469.0f, 9470.0f, 9471.0f, 9472.0f, 9473.0f, 9474.0f, 9475.0f, 9476.0f, 9477.0f, 9478.0f, 9479.0f, 9480.0f, 9481.0f, 9482.0f, 9483.0f, 9484.0f, 9485.0f, 9486.0f, 9487.0f, 9488.0f, 9489.0f, 9490.0f, 9491.0f, 9492.0f, 9493.0f, 9494.0f, 9495.0f, 9496.0f, 9497.0f, 9498.0f, 9499.0f, 9500.0f, 9501.0f, 9502.0f, 9503.0f, 9504.0f, 9505.0f, 9506.0f, 9507.0f, 9508.0f, 9509.0f, 9510.0f, 9511.0f, 9512.0f, 9513.0f, 9514.0f, 9515.0f, 9516.0f, 9517.0f, 9518.0f, 9519.0f, 9520.0f, 9521.0f, 9522.0f, 9523.0f, 9524.0f, 9525.0f, 9526.0f, 9527.0f, 9528.0f, 9529.0f, 9530.0f, 9531.0f, 9532.0f, 9533.0f, 9534.0f, 9535.0f, 9536.0f, 9537.0f, 9538.0f, 9539.0f, 9540.0f, 9541.0f, 9542.0f, 9543.0f, 9544.0f, 9545.0f, 9546.0f, 9547.0f, 9548.0f, 9549.0f, 9550.0f, 9551.0f, 9552.0f, 9553.0f, 9554.0f, 9555.0f, 9556.0f, 9557.0f, 9558.0f, 9559.0f, 9560.0f, 9561.0f, 9562.0f, 9563.0f, 9564.0f, 9565.0f, 9566.0f, 9567.0f, 9568.0f, 9569.0f, 9570.0f, 9571.0f, 9572.0f, 9573.0f, 9574.0f, 9575.0f, 9576.0f, 9577.0f, 9578.0f, 9579.0f, 9580.0f, 9581.0f, 9582.0f, 9583.0f, 9584.0f, 9585.0f, 9586.0f, 9587.0f, 9588.0f, 9589.0f, 9590.0f, 9591.0f, 9592.0f, 9593.0f, 9594.0f, 9595.0f, 9596.0f, 9597.0f, 9598.0f, 9599.0f, 9600.0f, 9601.0f, 9602.0f, 9603.0f, 9604.0f, 9605.0f, 9606.0f, 9607.0f, 9608.0f, 9609.0f, 9610.0f, 9611.0f, 9612.0f, 9613.0f, 9614.0f, 9615.0f, 9616.0f, 9617.0f, 9618.0f, 9619.0f, 9620.0f, 9621.0f, 9622.0f, 9623.0f, 9624.0f, 9625.0f, 9626.0f, 9627.0f, 9628.0f, 9629.0f, 9630.0f, 9631.0f, 9632.0f, 9633.0f, 9634.0f, 9635.0f, 9636.0f, 9637.0f, 9638.0f, 9639.0f, 9640.0f, 9641.0f, 9642.0f, 9643.0f, 9644.0f, 9645.0f, 9646.0f, 9647.0f, 9648.0f, 9649.0f, 9650.0f, 9651.0f, 9652.0f, 9653.0f, 9654.0f, 9655.0f, 9656.0f, 9657.0f, 9658.0f, 9659.0f, 9660.0f, 9661.0f, 9662.0f, 9663.0f, 9664.0f, 9665.0f, 9666.0f, 9667.0f, 9668.0f, 9669.0f, 9670.0f, 9671.0f, 9672.0f, 9673.0f, 9674.0f, 9675.0f, 9676.0f, 9677.0f, 9678.0f, 9679.0f, 9680.0f, 9681.0f, 9682.0f, 9683.0f, 9684.0f, 9685.0f, 9686.0f, 9687.0f, 9688.0f, 9689.0f, 9690.0f, 9691.0f, 9692.0f, 9693.0f, 9694.0f, 9695.0f, 9696.0f, 9697.0f, 9698.0f, 9699.0f, 9700.0f, 9701.0f, 9702.0f, 9703.0f, 9704.0f, 9705.0f, 9706.0f, 9707.0f, 9708.0f, 9709.0f, 9710.0f, 9711.0f, 9712.0f, 9713.0f, 9714.0f, 9715.0f, 9716.0f, 9717.0f, 9718.0f, 9719.0f, 9720.0f, 9721.0f, 9722.0f, 9723.0f, 9724.0f, 9725.0f, 9726.0f, 9727.0f, 9728.0f, 9729.0f, 9730.0f, 9731.0f, 9732.0f, 9733.0f, 9734.0f, 9735.0f, 9736.0f, 9737.0f, 9738.0f, 9739.0f, 9740.0f, 9741.0f, 9742.0f, 9743.0f, 9744.0f, 9745.0f, 9746.0f, 9747.0f, 9748.0f, 9749.0f, 9750.0f, 9751.0f, 9752.0f, 9753.0f, 9754.0f, 9755.0f, 9756.0f, 9757.0f, 9758.0f, 9759.0f, 9760.0f, 9761.0f, 9762.0f, 9763.0f, 9764.0f, 9765.0f, 9766.0f, 9767.0f, 9768.0f, 9769.0f, 9770.0f, 9771.0f, 9772.0f, 9773.0f, 9774.0f, 9775.0f, 9776.0f, 9777.0f, 9778.0f, 9779.0f, 9780.0f, 9781.0f, 9782.0f, 9783.0f, 9784.0f, 9785.0f, 9786.0f, 9787.0f, 9788.0f, 9789.0f, 9790.0f, 9791.0f, 9792.0f, 9793.0f, 9794.0f, 9795.0f, 9796.0f, 9797.0f, 9798.0f, 9799.0f, 9800.0f, 9801.0f, 9802.0f, 9803.0f, 9804.0f, 9805.0f, 9806.0f, 9807.0f, 9808.0f, 9809.0f, 9810.0f, 9811.0f, 9812.0f, 9813.0f, 9814.0f, 9815.0f, 9816.0f, 9817.0f, 9818.0f, 9819.0f, 9820.0f, 9821.0f, 9822.0f, 9823.0f, 9824.0f, 9825.0f, 9826.0f, 9827.0f, 9828.0f, 9829.0f, 9830.0f, 9831.0f, 9832.0f, 9833.0f, 9834.0f, 9835.0f, 9836.0f, 9837.0f, 9838.0f, 9839.0f, 9840.0f, 9841.0f, 9842.0f, 9843.0f, 9844.0f, 9845.0f, 9846.0f, 9847.0f, 9848.0f, 9849.0f, 9850.0f, 9851.0f, 9852.0f, 9853.0f, 9854.0f, 9855.0f, 9856.0f, 9857.0f, 9858.0f, 9859.0f, 9860.0f, 9861.0f, 9862.0f, 9863.0f, 9864.0f, 9865.0f, 9866.0f, 9867.0f, 9868.0f, 9869.0f, 9870.0f, 9871.0f, 9872.0f, 9873.0f, 9874.0f, 9875.0f, 9876.0f, 9877.0f, 9878.0f, 9879.0f, 9880.0f, 9881.0f, 9882.0f, 9883.0f, 9884.0f, 9885.0f, 9886.0f, 9887.0f, 9888.0f, 9889.0f, 9890.0f, 9891.0f, 9892.0f, 9893.0f, 9894.0f, 9895.0f, 9896.0f, 9897.0f, 9898.0f, 9899.0f, 9900.0f, 9901.0f, 9902.0f, 9903.0f, 9904.0f, 9905.0f, 9906.0f, 9907.0f, 9908.0f, 9909.0f, 9910.0f, 9911.0f, 9912.0f, 9913.0f, 9914.0f, 9915.0f, 9916.0f, 9917.0f, 9918.0f, 9919.0f, 9920.0f, 9921.0f, 9922.0f, 9923.0f, 9924.0f, 9925.0f, 9926.0f, 9927.0f, 9928.0f, 9929.0f, 9930.0f, 9931.0f, 9932.0f, 9933.0f, 9934.0f, 9935.0f, 9936.0f, 9937.0f, 9938.0f, 9939.0f, 9940.0f, 9941.0f, 9942.0f, 9943.0f, 9944.0f, 9945.0f, 9946.0f, 9947.0f, 9948.0f, 9949.0f, 9950.0f, 9951.0f, 9952.0f, 9953.0f, 9954.0f, 9955.0f, 9956.0f, 9957.0f, 9958.0f, 9959.0f, 9960.0f, 9961.0f, 9962.0f, 9963.0f, 9964.0f, 9965.0f, 9966.0f, 9967.0f, 9968.0f, 9969.0f, 9970.0f, 9971.0f, 9972.0f, 9973.0f, 9974.0f, 9975.0f, 9976.0f, 9977.0f, 9978.0f, 9979.0f, 9980.0f, 9981.0f, 9982.0f, 9983.0f, 9984.0f, 9985.0f, 9986.0f, 9987.0f, 9988.0f, 9989.0f, 9990.0f, 9991.0f, 9992.0f, 9993.0f, 9994.0f, 9995.0f, 9996.0f, 9997.0f, 9998.0f, 9999.0f, 10000.0f, 10001.0f, 10002.0f, 10003.0f, 10004.0f, 10005.0f, 10006.0f, 10007.0f, 10008.0f, 10009.0f, 10010.0f, 10011.0f, 10012.0f, 10013.0f, 10014.0f, 10015.0f, 10016.0f, 10017.0f, 10018.0f, 10019.0f, 10020.0f, 10021.0f, 10022.0f, 10023.0f, 10024.0f, 10025.0f, 10026.0f, 10027.0f, 10028.0f, 10029.0f, 10030.0f, 10031.0f, 10032.0f, 10033.0f, 10034.0f, 10035.0f, 10036.0f, 10037.0f, 10038.0f, 10039.0f, 10040.0f, 10041.0f, 10042.0f, 10043.0f, 10044.0f, 10045.0f, 10046.0f, 10047.0f, 10048.0f, 10049.0f, 10050.0f, 10051.0f, 10052.0f, 10053.0f, 10054.0f, 10055.0f, 10056.0f, 10057.0f, 10058.0f, 10059.0f, 10060.0f, 10061.0f, 10062.0f, 10063.0f, 10064.0f, 10065.0f, 10066.0f, 10067.0f, 10068.0f, 10069.0f, 10070.0f, 10071.0f, 10072.0f, 10073.0f, 10074.0f, 10075.0f, 10076.0f, 10077.0f, 10078.0f, 10079.0f, 10080.0f, 10081.0f, 10082.0f, 10083.0f, 10084.0f, 10085.0f, 10086.0f, 10087.0f, 10088.0f, 10089.0f, 10090.0f, 10091.0f, 10092.0f, 10093.0f, 10094.0f, 10095.0f, 10096.0f, 10097.0f, 10098.0f, 10099.0f, 10100.0f, 10101.0f, 10102.0f, 10103.0f, 10104.0f, 10105.0f, 10106.0f, 10107.0f, 10108.0f, 10109.0f, 10110.0f, 10111.0f, 10112.0f, 10113.0f, 10114.0f, 10115.0f, 10116.0f, 10117.0f, 10118.0f, 10119.0f, 10120.0f, 10121.0f, 10122.0f, 10123.0f, 10124.0f, 10125.0f, 10126.0f, 10127.0f, 10128.0f, 10129.0f, 10130.0f, 10131.0f, 10132.0f, 10133.0f, 10134.0f, 10135.0f, 10136.0f, 10137.0f, 10138.0f, 10139.0f, 10140.0f, 10141.0f, 10142.0f, 10143.0f, 10144.0f, 10145.0f, 10146.0f, 10147.0f, 10148.0f, 10149.0f, 10150.0f, 10151.0f, 10152.0f, 10153.0f, 10154.0f, 10155.0f, 10156.0f, 10157.0f, 10158.0f, 10159.0f, 10160.0f, 10161.0f, 10162.0f, 10163.0f, 10164.0f, 10165.0f, 10166.0f, 10167.0f, 10168.0f, 10169.0f, 10170.0f, 10171.0f, 10172.0f, 10173.0f, 10174.0f, 10175.0f, 10176.0f, 10177.0f, 10178.0f, 10179.0f, 10180.0f, 10181.0f, 10182.0f, 10183.0f, 10184.0f, 10185.0f, 10186.0f, 10187.0f, 10188.0f, 10189.0f, 10190.0f, 10191.0f, 10192.0f, 10193.0f, 10194.0f, 10195.0f, 10196.0f, 10197.0f, 10198.0f, 10199.0f, 10200.0f, 10201.0f, 10202.0f, 10203.0f, 10204.0f, 10205.0f, 10206.0f, 10207.0f, 10208.0f, 10209.0f, 10210.0f, 10211.0f, 10212.0f, 10213.0f, 10214.0f, 10215.0f, 10216.0f, 10217.0f, 10218.0f, 10219.0f, 10220.0f, 10221.0f, 10222.0f, 10223.0f, 10224.0f, 10225.0f, 10226.0f, 10227.0f, 10228.0f, 10229.0f, 10230.0f, 10231.0f, 10232.0f, 10233.0f, 10234.0f, 10235.0f, 10236.0f, 10237.0f, 10238.0f, 10239.0f, 10240.0f, 10241.0f, 10242.0f, 10243.0f, 10244.0f, 10245.0f, 10246.0f, 10247.0f, 10248.0f, 10249.0f, 10250.0f, 10251.0f, 10252.0f, 10253.0f, 10254.0f, 10255.0f, 10256.0f, 10257.0f, 10258.0f, 10259.0f, 10260.0f, 10261.0f, 10262.0f, 10263.0f, 10264.0f, 10265.0f, 10266.0f, 10267.0f, 10268.0f, 10269.0f, 10270.0f, 10271.0f, 10272.0f, 10273.0f, 10274.0f, 10275.0f, 10276.0f, 10277.0f, 10278.0f, 10279.0f, 10280.0f, 10281.0f, 10282.0f, 10283.0f, 10284.0f, 10285.0f, 10286.0f, 10287.0f, 10288.0f, 10289.0f, 10290.0f, 10291.0f, 10292.0f, 10293.0f, 10294.0f, 10295.0f, 10296.0f, 10297.0f, 10298.0f, 10299.0f, 10300.0f, 10301.0f, 10302.0f, 10303.0f, 10304.0f, 10305.0f, 10306.0f, 10307.0f, 10308.0f, 10309.0f, 10310.0f, 10311.0f, 10312.0f, 10313.0f, 10314.0f, 10315.0f, 10316.0f, 10317.0f, 10318.0f, 10319.0f, 10320.0f, 10321.0f, 10322.0f, 10323.0f, 10324.0f, 10325.0f, 10326.0f, 10327.0f, 10328.0f, 10329.0f, 10330.0f, 10331.0f, 10332.0f, 10333.0f, 10334.0f, 10335.0f, 10336.0f, 10337.0f, 10338.0f, 10339.0f, 10340.0f, 10341.0f, 10342.0f, 10343.0f, 10344.0f, 10345.0f, 10346.0f, 10347.0f, 10348.0f, 10349.0f, 10350.0f, 10351.0f, 10352.0f, 10353.0f, 10354.0f, 10355.0f, 10356.0f, 10357.0f, 10358.0f, 10359.0f, 10360.0f, 10361.0f, 10362.0f, 10363.0f, 10364.0f, 10365.0f, 10366.0f, 10367.0f, 10368.0f, 10369.0f, 10370.0f, 10371.0f, 10372.0f, 10373.0f, 10374.0f, 10375.0f, 10376.0f, 10377.0f, 10378.0f, 10379.0f, 10380.0f, 10381.0f, 10382.0f, 10383.0f, 10384.0f, 10385.0f, 10386.0f, 10387.0f, 10388.0f, 10389.0f, 10390.0f, 10391.0f, 10392.0f, 10393.0f, 10394.0f, 10395.0f, 10396.0f, 10397.0f, 10398.0f, 10399.0f, 10400.0f, 10401.0f, 10402.0f, 10403.0f, 10404.0f, 10405.0f, 10406.0f, 10407.0f, 10408.0f, 10409.0f, 10410.0f, 10411.0f, 10412.0f, 10413.0f, 10414.0f, 10415.0f, 10416.0f, 10417.0f, 10418.0f, 10419.0f, 10420.0f, 10421.0f, 10422.0f, 10423.0f, 10424.0f, 10425.0f, 10426.0f, 10427.0f, 10428.0f, 10429.0f, 10430.0f, 10431.0f, 10432.0f, 10433.0f, 10434.0f, 10435.0f, 10436.0f, 10437.0f, 10438.0f, 10439.0f, 10440.0f, 10441.0f, 10442.0f, 10443.0f, 10444.0f, 10445.0f, 10446.0f, 10447.0f, 10448.0f, 10449.0f, 10450.0f, 10451.0f, 10452.0f, 10453.0f, 10454.0f, 10455.0f, 10456.0f, 10457.0f, 10458.0f, 10459.0f, 10460.0f, 10461.0f, 10462.0f, 10463.0f, 10464.0f, 10465.0f, 10466.0f, 10467.0f, 10468.0f, 10469.0f, 10470.0f, 10471.0f, 10472.0f, 10473.0f, 10474.0f, 10475.0f, 10476.0f, 10477.0f, 10478.0f, 10479.0f, 10480.0f, 10481.0f, 10482.0f, 10483.0f, 10484.0f, 10485.0f, 10486.0f, 10487.0f, 10488.0f, 10489.0f, 10490.0f, 10491.0f, 10492.0f, 10493.0f, 10494.0f, 10495.0f, 10496.0f, 10497.0f, 10498.0f, 10499.0f, 10500.0f, 10501.0f, 10502.0f, 10503.0f, 10504.0f, 10505.0f, 10506.0f, 10507.0f, 10508.0f, 10509.0f, 10510.0f, 10511.0f, 10512.0f, 10513.0f, 10514.0f, 10515.0f, 10516.0f, 10517.0f, 10518.0f, 10519.0f, 10520.0f, 10521.0f, 10522.0f, 10523.0f, 10524.0f, 10525.0f, 10526.0f, 10527.0f, 10528.0f, 10529.0f, 10530.0f, 10531.0f, 10532.0f, 10533.0f, 10534.0f, 10535.0f, 10536.0f, 10537.0f, 10538.0f, 10539.0f, 10540.0f, 10541.0f, 10542.0f, 10543.0f, 10544.0f, 10545.0f, 10546.0f, 10547.0f, 10548.0f, 10549.0f, 10550.0f, 10551.0f, 10552.0f, 10553.0f, 10554.0f, 10555.0f, 10556.0f, 10557.0f, 10558.0f, 10559.0f, 10560.0f, 10561.0f, 10562.0f, 10563.0f, 10564.0f, 10565.0f, 10566.0f, 10567.0f, 10568.0f, 10569.0f, 10570.0f, 10571.0f, 10572.0f, 10573.0f, 10574.0f, 10575.0f, 10576.0f, 10577.0f, 10578.0f, 10579.0f, 10580.0f, 10581.0f, 10582.0f, 10583.0f, 10584.0f, 10585.0f, 10586.0f, 10587.0f, 10588.0f, 10589.0f, 10590.0f, 10591.0f, 10592.0f, 10593.0f, 10594.0f, 10595.0f, 10596.0f, 10597.0f, 10598.0f, 10599.0f, 10600.0f, 10601.0f, 10602.0f, 10603.0f, 10604.0f, 10605.0f, 10606.0f, 10607.0f, 10608.0f, 10609.0f, 10610.0f, 10611.0f, 10612.0f, 10613.0f, 10614.0f, 10615.0f, 10616.0f, 10617.0f, 10618.0f, 10619.0f, 10620.0f, 10621.0f, 10622.0f, 10623.0f, 10624.0f, 10625.0f, 10626.0f, 10627.0f, 10628.0f, 10629.0f, 10630.0f, 10631.0f, 10632.0f, 10633.0f, 10634.0f, 10635.0f, 10636.0f, 10637.0f, 10638.0f, 10639.0f, 10640.0f, 10641.0f, 10642.0f, 10643.0f, 10644.0f, 10645.0f, 10646.0f, 10647.0f, 10648.0f, 10649.0f, 10650.0f, 10651.0f, 10652.0f, 10653.0f, 10654.0f, 10655.0f, 10656.0f, 10657.0f, 10658.0f, 10659.0f, 10660.0f, 10661.0f, 10662.0f, 10663.0f, 10664.0f, 10665.0f, 10666.0f, 10667.0f, 10668.0f, 10669.0f, 10670.0f, 10671.0f, 10672.0f, 10673.0f, 10674.0f, 10675.0f, 10676.0f, 10677.0f, 10678.0f, 10679.0f, 10680.0f, 10681.0f, 10682.0f, 10683.0f, 10684.0f, 10685.0f, 10686.0f, 10687.0f, 10688.0f, 10689.0f, 10690.0f, 10691.0f, 10692.0f, 10693.0f, 10694.0f, 10695.0f, 10696.0f, 10697.0f, 10698.0f, 10699.0f, 10700.0f, 10701.0f, 10702.0f, 10703.0f, 10704.0f, 10705.0f, 10706.0f, 10707.0f, 10708.0f, 10709.0f, 10710.0f, 10711.0f, 10712.0f, 10713.0f, 10714.0f, 10715.0f, 10716.0f, 10717.0f, 10718.0f, 10719.0f, 10720.0f, 10721.0f, 10722.0f, 10723.0f, 10724.0f, 10725.0f, 10726.0f, 10727.0f, 10728.0f, 10729.0f, 10730.0f, 10731.0f, 10732.0f, 10733.0f, 10734.0f, 10735.0f, 10736.0f, 10737.0f, 10738.0f, 10739.0f, 10740.0f, 10741.0f, 10742.0f, 10743.0f, 10744.0f, 10745.0f, 10746.0f, 10747.0f, 10748.0f, 10749.0f, 10750.0f, 10751.0f, 10752.0f, 10753.0f, 10754.0f, 10755.0f, 10756.0f, 10757.0f, 10758.0f, 10759.0f, 10760.0f, 10761.0f, 10762.0f, 10763.0f, 10764.0f, 10765.0f, 10766.0f, 10767.0f, 10768.0f, 10769.0f, 10770.0f, 10771.0f, 10772.0f, 10773.0f, 10774.0f, 10775.0f, 10776.0f, 10777.0f, 10778.0f, 10779.0f, 10780.0f, 10781.0f, 10782.0f, 10783.0f, 10784.0f, 10785.0f, 10786.0f, 10787.0f, 10788.0f, 10789.0f, 10790.0f, 10791.0f, 10792.0f, 10793.0f, 10794.0f, 10795.0f, 10796.0f, 10797.0f, 10798.0f, 10799.0f, 10800.0f, 10801.0f, 10802.0f, 10803.0f, 10804.0f, 10805.0f, 10806.0f, 10807.0f, 10808.0f, 10809.0f, 10810.0f, 10811.0f, 10812.0f, 10813.0f, 10814.0f, 10815.0f, 10816.0f, 10817.0f, 10818.0f, 10819.0f, 10820.0f, 10821.0f, 10822.0f, 10823.0f, 10824.0f, 10825.0f, 10826.0f, 10827.0f, 10828.0f, 10829.0f, 10830.0f, 10831.0f, 10832.0f, 10833.0f, 10834.0f, 10835.0f, 10836.0f, 10837.0f, 10838.0f, 10839.0f, 10840.0f, 10841.0f, 10842.0f, 10843.0f, 10844.0f, 10845.0f, 10846.0f, 10847.0f, 10848.0f, 10849.0f, 10850.0f, 10851.0f, 10852.0f, 10853.0f, 10854.0f, 10855.0f, 10856.0f, 10857.0f, 10858.0f, 10859.0f, 10860.0f, 10861.0f, 10862.0f, 10863.0f, 10864.0f, 10865.0f, 10866.0f, 10867.0f, 10868.0f, 10869.0f, 10870.0f, 10871.0f, 10872.0f, 10873.0f, 10874.0f, 10875.0f, 10876.0f, 10877.0f, 10878.0f, 10879.0f, 10880.0f, 10881.0f, 10882.0f, 10883.0f, 10884.0f, 10885.0f, 10886.0f, 10887.0f, 10888.0f, 10889.0f, 10890.0f, 10891.0f, 10892.0f, 10893.0f, 10894.0f, 10895.0f, 10896.0f, 10897.0f, 10898.0f, 10899.0f, 10900.0f, 10901.0f, 10902.0f, 10903.0f, 10904.0f, 10905.0f, 10906.0f, 10907.0f, 10908.0f, 10909.0f, 10910.0f, 10911.0f, 10912.0f, 10913.0f, 10914.0f, 10915.0f, 10916.0f, 10917.0f, 10918.0f, 10919.0f, 10920.0f, 10921.0f, 10922.0f, 10923.0f, 10924.0f, 10925.0f, 10926.0f, 10927.0f, 10928.0f, 10929.0f, 10930.0f, 10931.0f, 10932.0f, 10933.0f, 10934.0f, 10935.0f, 10936.0f, 10937.0f, 10938.0f, 10939.0f, 10940.0f, 10941.0f, 10942.0f, 10943.0f, 10944.0f, 10945.0f, 10946.0f, 10947.0f, 10948.0f, 10949.0f, 10950.0f, 10951.0f, 10952.0f, 10953.0f, 10954.0f, 10955.0f, 10956.0f, 10957.0f, 10958.0f, 10959.0f, 10960.0f, 10961.0f, 10962.0f, 10963.0f, 10964.0f, 10965.0f, 10966.0f, 10967.0f, 10968.0f, 10969.0f, 10970.0f, 10971.0f, 10972.0f, 10973.0f, 10974.0f, 10975.0f, 10976.0f, 10977.0f, 10978.0f, 10979.0f, 10980.0f, 10981.0f, 10982.0f, 10983.0f, 10984.0f, 10985.0f, 10986.0f, 10987.0f, 10988.0f, 10989.0f, 10990.0f, 10991.0f, 10992.0f, 10993.0f, 10994.0f, 10995.0f, 10996.0f, 10997.0f, 10998.0f, 10999.0f, 11000.0f, 11001.0f, 11002.0f, 11003.0f, 11004.0f, 11005.0f, 11006.0f, 11007.0f, 11008.0f, 11009.0f, 11010.0f, 11011.0f, 11012.0f, 11013.0f, 11014.0f, 11015.0f, 11016.0f, 11017.0f, 11018.0f, 11019.0f, 11020.0f, 11021.0f, 11022.0f, 11023.0f, 11024.0f, 11025.0f, 11026.0f, 11027.0f, 11028.0f, 11029.0f, 11030.0f, 11031.0f, 11032.0f, 11033.0f, 11034.0f, 11035.0f, 11036.0f, 11037.0f, 11038.0f, 11039.0f, 11040.0f, 11041.0f, 11042.0f, 11043.0f, 11044.0f, 11045.0f, 11046.0f, 11047.0f, 11048.0f, 11049.0f, 11050.0f, 11051.0f, 11052.0f, 11053.0f, 11054.0f, 11055.0f, 11056.0f, 11057.0f, 11058.0f, 11059.0f, 11060.0f, 11061.0f, 11062.0f, 11063.0f, 11064.0f, 11065.0f, 11066.0f, 11067.0f, 11068.0f, 11069.0f, 11070.0f, 11071.0f, 11072.0f, 11073.0f, 11074.0f, 11075.0f, 11076.0f, 11077.0f, 11078.0f, 11079.0f, 11080.0f, 11081.0f, 11082.0f, 11083.0f, 11084.0f, 11085.0f, 11086.0f, 11087.0f, 11088.0f, 11089.0f, 11090.0f, 11091.0f, 11092.0f, 11093.0f, 11094.0f, 11095.0f, 11096.0f, 11097.0f, 11098.0f, 11099.0f, 11100.0f, 11101.0f, 11102.0f, 11103.0f, 11104.0f, 11105.0f, 11106.0f, 11107.0f, 11108.0f, 11109.0f, 11110.0f, 11111.0f, 11112.0f, 11113.0f, 11114.0f, 11115.0f, 11116.0f, 11117.0f, 11118.0f, 11119.0f, 11120.0f, 11121.0f, 11122.0f, 11123.0f, 11124.0f, 11125.0f, 11126.0f, 11127.0f, 11128.0f, 11129.0f, 11130.0f, 11131.0f, 11132.0f, 11133.0f, 11134.0f, 11135.0f, 11136.0f, 11137.0f, 11138.0f, 11139.0f, 11140.0f, 11141.0f, 11142.0f, 11143.0f, 11144.0f, 11145.0f, 11146.0f, 11147.0f, 11148.0f, 11149.0f, 11150.0f, 11151.0f, 11152.0f, 11153.0f, 11154.0f, 11155.0f, 11156.0f, 11157.0f, 11158.0f, 11159.0f, 11160.0f, 11161.0f, 11162.0f, 11163.0f, 11164.0f, 11165.0f, 11166.0f, 11167.0f, 11168.0f, 11169.0f, 11170.0f, 11171.0f, 11172.0f, 11173.0f, 11174.0f, 11175.0f, 11176.0f, 11177.0f, 11178.0f, 11179.0f, 11180.0f, 11181.0f, 11182.0f, 11183.0f, 11184.0f, 11185.0f, 11186.0f, 11187.0f, 11188.0f, 11189.0f, 11190.0f, 11191.0f, 11192.0f, 11193.0f, 11194.0f, 11195.0f, 11196.0f, 11197.0f, 11198.0f, 11199.0f, 11200.0f, 11201.0f, 11202.0f, 11203.0f, 11204.0f, 11205.0f, 11206.0f, 11207.0f, 11208.0f, 11209.0f, 11210.0f, 11211.0f, 11212.0f, 11213.0f, 11214.0f, 11215.0f, 11216.0f, 11217.0f, 11218.0f, 11219.0f, 11220.0f, 11221.0f, 11222.0f, 11223.0f, 11224.0f, 11225.0f, 11226.0f, 11227.0f, 11228.0f, 11229.0f, 11230.0f, 11231.0f, 11232.0f, 11233.0f, 11234.0f, 11235.0f, 11236.0f, 11237.0f, 11238.0f, 11239.0f, 11240.0f, 11241.0f, 11242.0f, 11243.0f, 11244.0f, 11245.0f, 11246.0f, 11247.0f, 11248.0f, 11249.0f, 11250.0f, 11251.0f, 11252.0f, 11253.0f, 11254.0f, 11255.0f, 11256.0f, 11257.0f, 11258.0f, 11259.0f, 11260.0f, 11261.0f, 11262.0f, 11263.0f, 11264.0f, 11265.0f, 11266.0f, 11267.0f, 11268.0f, 11269.0f, 11270.0f, 11271.0f, 11272.0f, 11273.0f, 11274.0f, 11275.0f, 11276.0f, 11277.0f, 11278.0f, 11279.0f, 11280.0f, 11281.0f, 11282.0f, 11283.0f, 11284.0f, 11285.0f, 11286.0f, 11287.0f, 11288.0f, 11289.0f, 11290.0f, 11291.0f, 11292.0f, 11293.0f, 11294.0f, 11295.0f, 11296.0f, 11297.0f, 11298.0f, 11299.0f, 11300.0f, 11301.0f, 11302.0f, 11303.0f, 11304.0f, 11305.0f, 11306.0f, 11307.0f, 11308.0f, 11309.0f, 11310.0f, 11311.0f, 11312.0f, 11313.0f, 11314.0f, 11315.0f, 11316.0f, 11317.0f, 11318.0f, 11319.0f, 11320.0f, 11321.0f, 11322.0f, 11323.0f, 11324.0f, 11325.0f, 11326.0f, 11327.0f, 11328.0f, 11329.0f, 11330.0f, 11331.0f, 11332.0f, 11333.0f, 11334.0f, 11335.0f, 11336.0f, 11337.0f, 11338.0f, 11339.0f, 11340.0f, 11341.0f, 11342.0f, 11343.0f, 11344.0f, 11345.0f, 11346.0f, 11347.0f, 11348.0f, 11349.0f, 11350.0f, 11351.0f, 11352.0f, 11353.0f, 11354.0f, 11355.0f, 11356.0f, 11357.0f, 11358.0f, 11359.0f, 11360.0f, 11361.0f, 11362.0f, 11363.0f, 11364.0f, 11365.0f, 11366.0f, 11367.0f, 11368.0f, 11369.0f, 11370.0f, 11371.0f, 11372.0f, 11373.0f, 11374.0f, 11375.0f, 11376.0f, 11377.0f, 11378.0f, 11379.0f, 11380.0f, 11381.0f, 11382.0f, 11383.0f, 11384.0f, 11385.0f, 11386.0f, 11387.0f, 11388.0f, 11389.0f, 11390.0f, 11391.0f, 11392.0f, 11393.0f, 11394.0f, 11395.0f, 11396.0f, 11397.0f, 11398.0f, 11399.0f, 11400.0f, 11401.0f, 11402.0f, 11403.0f, 11404.0f, 11405.0f, 11406.0f, 11407.0f, 11408.0f, 11409.0f, 11410.0f, 11411.0f, 11412.0f, 11413.0f, 11414.0f, 11415.0f, 11416.0f, 11417.0f, 11418.0f, 11419.0f, 11420.0f, 11421.0f, 11422.0f, 11423.0f, 11424.0f, 11425.0f, 11426.0f, 11427.0f, 11428.0f, 11429.0f, 11430.0f, 11431.0f, 11432.0f, 11433.0f, 11434.0f, 11435.0f, 11436.0f, 11437.0f, 11438.0f, 11439.0f, 11440.0f, 11441.0f, 11442.0f, 11443.0f, 11444.0f, 11445.0f, 11446.0f, 11447.0f, 11448.0f, 11449.0f, 11450.0f, 11451.0f, 11452.0f, 11453.0f, 11454.0f, 11455.0f, 11456.0f, 11457.0f, 11458.0f, 11459.0f, 11460.0f, 11461.0f, 11462.0f, 11463.0f, 11464.0f, 11465.0f, 11466.0f, 11467.0f, 11468.0f, 11469.0f, 11470.0f, 11471.0f, 11472.0f, 11473.0f, 11474.0f, 11475.0f, 11476.0f, 11477.0f, 11478.0f, 11479.0f, 11480.0f, 11481.0f, 11482.0f, 11483.0f, 11484.0f, 11485.0f, 11486.0f, 11487.0f, 11488.0f, 11489.0f, 11490.0f, 11491.0f, 11492.0f, 11493.0f, 11494.0f, 11495.0f, 11496.0f, 11497.0f, 11498.0f, 11499.0f, 11500.0f, 11501.0f, 11502.0f, 11503.0f, 11504.0f, 11505.0f, 11506.0f, 11507.0f, 11508.0f, 11509.0f, 11510.0f, 11511.0f, 11512.0f, 11513.0f, 11514.0f, 11515.0f, 11516.0f, 11517.0f, 11518.0f, 11519.0f, 11520.0f, 11521.0f, 11522.0f, 11523.0f, 11524.0f, 11525.0f, 11526.0f, 11527.0f, 11528.0f, 11529.0f, 11530.0f, 11531.0f, 11532.0f, 11533.0f, 11534.0f, 11535.0f, 11536.0f, 11537.0f, 11538.0f, 11539.0f, 11540.0f, 11541.0f, 11542.0f, 11543.0f, 11544.0f, 11545.0f, 11546.0f, 11547.0f, 11548.0f, 11549.0f, 11550.0f, 11551.0f, 11552.0f, 11553.0f, 11554.0f, 11555.0f, 11556.0f, 11557.0f, 11558.0f, 11559.0f, 11560.0f, 11561.0f, 11562.0f, 11563.0f, 11564.0f, 11565.0f, 11566.0f, 11567.0f, 11568.0f, 11569.0f, 11570.0f, 11571.0f, 11572.0f, 11573.0f, 11574.0f, 11575.0f, 11576.0f, 11577.0f, 11578.0f, 11579.0f, 11580.0f, 11581.0f, 11582.0f, 11583.0f, 11584.0f, 11585.0f, 11586.0f, 11587.0f, 11588.0f, 11589.0f, 11590.0f, 11591.0f, 11592.0f, 11593.0f, 11594.0f, 11595.0f, 11596.0f, 11597.0f, 11598.0f, 11599.0f, 11600.0f, 11601.0f, 11602.0f, 11603.0f, 11604.0f, 11605.0f, 11606.0f, 11607.0f, 11608.0f, 11609.0f, 11610.0f, 11611.0f, 11612.0f, 11613.0f, 11614.0f, 11615.0f, 11616.0f, 11617.0f, 11618.0f, 11619.0f, 11620.0f, 11621.0f, 11622.0f, 11623.0f, 11624.0f, 11625.0f, 11626.0f, 11627.0f, 11628.0f, 11629.0f, 11630.0f, 11631.0f, 11632.0f, 11633.0f, 11634.0f, 11635.0f, 11636.0f, 11637.0f, 11638.0f, 11639.0f, 11640.0f, 11641.0f, 11642.0f, 11643.0f, 11644.0f, 11645.0f, 11646.0f, 11647.0f, 11648.0f, 11649.0f, 11650.0f, 11651.0f, 11652.0f, 11653.0f, 11654.0f, 11655.0f, 11656.0f, 11657.0f, 11658.0f, 11659.0f, 11660.0f, 11661.0f, 11662.0f, 11663.0f, 11664.0f, 11665.0f, 11666.0f, 11667.0f, 11668.0f, 11669.0f, 11670.0f, 11671.0f, 11672.0f, 11673.0f, 11674.0f, 11675.0f, 11676.0f, 11677.0f, 11678.0f, 11679.0f, 11680.0f, 11681.0f, 11682.0f, 11683.0f, 11684.0f, 11685.0f, 11686.0f, 11687.0f, 11688.0f, 11689.0f, 11690.0f, 11691.0f, 11692.0f, 11693.0f, 11694.0f, 11695.0f, 11696.0f, 11697.0f, 11698.0f, 11699.0f, 11700.0f, 11701.0f, 11702.0f, 11703.0f, 11704.0f, 11705.0f, 11706.0f, 11707.0f, 11708.0f, 11709.0f, 11710.0f, 11711.0f, 11712.0f, 11713.0f, 11714.0f, 11715.0f, 11716.0f, 11717.0f, 11718.0f, 11719.0f, 11720.0f, 11721.0f, 11722.0f, 11723.0f, 11724.0f, 11725.0f, 11726.0f, 11727.0f, 11728.0f, 11729.0f, 11730.0f, 11731.0f, 11732.0f, 11733.0f, 11734.0f, 11735.0f, 11736.0f, 11737.0f, 11738.0f, 11739.0f, 11740.0f, 11741.0f, 11742.0f, 11743.0f, 11744.0f, 11745.0f, 11746.0f, 11747.0f, 11748.0f, 11749.0f, 11750.0f, 11751.0f, 11752.0f, 11753.0f, 11754.0f, 11755.0f, 11756.0f, 11757.0f, 11758.0f, 11759.0f, 11760.0f, 11761.0f, 11762.0f, 11763.0f, 11764.0f, 11765.0f, 11766.0f, 11767.0f, 11768.0f, 11769.0f, 11770.0f, 11771.0f, 11772.0f, 11773.0f, 11774.0f, 11775.0f, 11776.0f, 11777.0f, 11778.0f, 11779.0f, 11780.0f, 11781.0f, 11782.0f, 11783.0f, 11784.0f, 11785.0f, 11786.0f, 11787.0f, 11788.0f, 11789.0f, 11790.0f, 11791.0f, 11792.0f, 11793.0f, 11794.0f, 11795.0f, 11796.0f, 11797.0f, 11798.0f, 11799.0f, 11800.0f, 11801.0f, 11802.0f, 11803.0f, 11804.0f, 11805.0f, 11806.0f, 11807.0f, 11808.0f, 11809.0f, 11810.0f, 11811.0f, 11812.0f, 11813.0f, 11814.0f, 11815.0f, 11816.0f, 11817.0f, 11818.0f, 11819.0f, 11820.0f, 11821.0f, 11822.0f, 11823.0f, 11824.0f, 11825.0f, 11826.0f, 11827.0f, 11828.0f, 11829.0f, 11830.0f, 11831.0f, 11832.0f, 11833.0f, 11834.0f, 11835.0f, 11836.0f, 11837.0f, 11838.0f, 11839.0f, 11840.0f, 11841.0f, 11842.0f, 11843.0f, 11844.0f, 11845.0f, 11846.0f, 11847.0f, 11848.0f, 11849.0f, 11850.0f, 11851.0f, 11852.0f, 11853.0f, 11854.0f, 11855.0f, 11856.0f, 11857.0f, 11858.0f, 11859.0f, 11860.0f, 11861.0f, 11862.0f, 11863.0f, 11864.0f, 11865.0f, 11866.0f, 11867.0f, 11868.0f, 11869.0f, 11870.0f, 11871.0f, 11872.0f, 11873.0f, 11874.0f, 11875.0f, 11876.0f, 11877.0f, 11878.0f, 11879.0f, 11880.0f, 11881.0f, 11882.0f, 11883.0f, 11884.0f, 11885.0f, 11886.0f, 11887.0f, 11888.0f, 11889.0f, 11890.0f, 11891.0f, 11892.0f, 11893.0f, 11894.0f, 11895.0f, 11896.0f, 11897.0f, 11898.0f, 11899.0f, 11900.0f, 11901.0f, 11902.0f, 11903.0f, 11904.0f, 11905.0f, 11906.0f, 11907.0f, 11908.0f, 11909.0f, 11910.0f, 11911.0f, 11912.0f, 11913.0f, 11914.0f, 11915.0f, 11916.0f, 11917.0f, 11918.0f, 11919.0f, 11920.0f, 11921.0f, 11922.0f, 11923.0f, 11924.0f, 11925.0f, 11926.0f, 11927.0f, 11928.0f, 11929.0f, 11930.0f, 11931.0f, 11932.0f, 11933.0f, 11934.0f, 11935.0f, 11936.0f, 11937.0f, 11938.0f, 11939.0f, 11940.0f, 11941.0f, 11942.0f, 11943.0f, 11944.0f, 11945.0f, 11946.0f, 11947.0f, 11948.0f, 11949.0f, 11950.0f, 11951.0f, 11952.0f, 11953.0f, 11954.0f, 11955.0f, 11956.0f, 11957.0f, 11958.0f, 11959.0f, 11960.0f, 11961.0f, 11962.0f, 11963.0f, 11964.0f, 11965.0f, 11966.0f, 11967.0f, 11968.0f, 11969.0f, 11970.0f, 11971.0f, 11972.0f, 11973.0f, 11974.0f, 11975.0f, 11976.0f, 11977.0f, 11978.0f, 11979.0f, 11980.0f, 11981.0f, 11982.0f, 11983.0f, 11984.0f, 11985.0f, 11986.0f, 11987.0f, 11988.0f, 11989.0f, 11990.0f, 11991.0f, 11992.0f, 11993.0f, 11994.0f, 11995.0f, 11996.0f, 11997.0f, 11998.0f, 11999.0f, 12000.0f, 12001.0f, 12002.0f, 12003.0f, 12004.0f, 12005.0f, 12006.0f, 12007.0f, 12008.0f, 12009.0f, 12010.0f, 12011.0f, 12012.0f, 12013.0f, 12014.0f, 12015.0f, 12016.0f, 12017.0f, 12018.0f, 12019.0f, 12020.0f, 12021.0f, 12022.0f, 12023.0f, 12024.0f, 12025.0f, 12026.0f, 12027.0f, 12028.0f, 12029.0f, 12030.0f, 12031.0f, 12032.0f, 12033.0f, 12034.0f, 12035.0f, 12036.0f, 12037.0f, 12038.0f, 12039.0f, 12040.0f, 12041.0f, 12042.0f, 12043.0f, 12044.0f, 12045.0f, 12046.0f, 12047.0f, 12048.0f, 12049.0f, 12050.0f, 12051.0f, 12052.0f, 12053.0f, 12054.0f, 12055.0f, 12056.0f, 12057.0f, 12058.0f, 12059.0f, 12060.0f, 12061.0f, 12062.0f, 12063.0f, 12064.0f, 12065.0f, 12066.0f, 12067.0f, 12068.0f, 12069.0f, 12070.0f, 12071.0f, 12072.0f, 12073.0f, 12074.0f, 12075.0f, 12076.0f, 12077.0f, 12078.0f, 12079.0f, 12080.0f, 12081.0f, 12082.0f, 12083.0f, 12084.0f, 12085.0f, 12086.0f, 12087.0f, 12088.0f, 12089.0f, 12090.0f, 12091.0f, 12092.0f, 12093.0f, 12094.0f, 12095.0f, 12096.0f, 12097.0f, 12098.0f, 12099.0f, 12100.0f, 12101.0f, 12102.0f, 12103.0f, 12104.0f, 12105.0f, 12106.0f, 12107.0f, 12108.0f, 12109.0f, 12110.0f, 12111.0f, 12112.0f, 12113.0f, 12114.0f, 12115.0f, 12116.0f, 12117.0f, 12118.0f, 12119.0f, 12120.0f, 12121.0f, 12122.0f, 12123.0f, 12124.0f, 12125.0f, 12126.0f, 12127.0f, 12128.0f, 12129.0f, 12130.0f, 12131.0f, 12132.0f, 12133.0f, 12134.0f, 12135.0f, 12136.0f, 12137.0f, 12138.0f, 12139.0f, 12140.0f, 12141.0f, 12142.0f, 12143.0f, 12144.0f, 12145.0f, 12146.0f, 12147.0f, 12148.0f, 12149.0f, 12150.0f, 12151.0f, 12152.0f, 12153.0f, 12154.0f, 12155.0f, 12156.0f, 12157.0f, 12158.0f, 12159.0f, 12160.0f, 12161.0f, 12162.0f, 12163.0f, 12164.0f, 12165.0f, 12166.0f, 12167.0f, 12168.0f, 12169.0f, 12170.0f, 12171.0f, 12172.0f, 12173.0f, 12174.0f, 12175.0f, 12176.0f, 12177.0f, 12178.0f, 12179.0f, 12180.0f, 12181.0f, 12182.0f, 12183.0f, 12184.0f, 12185.0f, 12186.0f, 12187.0f, 12188.0f, 12189.0f, 12190.0f, 12191.0f, 12192.0f, 12193.0f, 12194.0f, 12195.0f, 12196.0f, 12197.0f, 12198.0f, 12199.0f, 12200.0f, 12201.0f, 12202.0f, 12203.0f, 12204.0f, 12205.0f, 12206.0f, 12207.0f, 12208.0f, 12209.0f, 12210.0f, 12211.0f, 12212.0f, 12213.0f, 12214.0f, 12215.0f, 12216.0f, 12217.0f, 12218.0f, 12219.0f, 12220.0f, 12221.0f, 12222.0f, 12223.0f, 12224.0f, 12225.0f, 12226.0f, 12227.0f, 12228.0f, 12229.0f, 12230.0f, 12231.0f, 12232.0f, 12233.0f, 12234.0f, 12235.0f, 12236.0f, 12237.0f, 12238.0f, 12239.0f, 12240.0f, 12241.0f, 12242.0f, 12243.0f, 12244.0f, 12245.0f, 12246.0f, 12247.0f, 12248.0f, 12249.0f, 12250.0f, 12251.0f, 12252.0f, 12253.0f, 12254.0f, 12255.0f, 12256.0f, 12257.0f, 12258.0f, 12259.0f, 12260.0f, 12261.0f, 12262.0f, 12263.0f, 12264.0f, 12265.0f, 12266.0f, 12267.0f, 12268.0f, 12269.0f, 12270.0f, 12271.0f, 12272.0f, 12273.0f, 12274.0f, 12275.0f, 12276.0f, 12277.0f, 12278.0f, 12279.0f, 12280.0f, 12281.0f, 12282.0f, 12283.0f, 12284.0f, 12285.0f, 12286.0f, 12287.0f, 12288.0f, 12289.0f, 12290.0f, 12291.0f, 12292.0f, 12293.0f, 12294.0f, 12295.0f, 12296.0f, 12297.0f, 12298.0f, 12299.0f, 12300.0f, 12301.0f, 12302.0f, 12303.0f, 12304.0f, 12305.0f, 12306.0f, 12307.0f, 12308.0f, 12309.0f, 12310.0f, 12311.0f, 12312.0f, 12313.0f, 12314.0f, 12315.0f, 12316.0f, 12317.0f, 12318.0f, 12319.0f, 12320.0f, 12321.0f, 12322.0f, 12323.0f, 12324.0f, 12325.0f, 12326.0f, 12327.0f, 12328.0f, 12329.0f, 12330.0f, 12331.0f, 12332.0f, 12333.0f, 12334.0f, 12335.0f, 12336.0f, 12337.0f, 12338.0f, 12339.0f, 12340.0f, 12341.0f, 12342.0f, 12343.0f, 12344.0f, 12345.0f, 12346.0f, 12347.0f, 12348.0f, 12349.0f, 12350.0f, 12351.0f, 12352.0f, 12353.0f, 12354.0f, 12355.0f, 12356.0f, 12357.0f, 12358.0f, 12359.0f, 12360.0f, 12361.0f, 12362.0f, 12363.0f, 12364.0f, 12365.0f, 12366.0f, 12367.0f, 12368.0f, 12369.0f, 12370.0f, 12371.0f, 12372.0f, 12373.0f, 12374.0f, 12375.0f, 12376.0f, 12377.0f, 12378.0f, 12379.0f, 12380.0f, 12381.0f, 12382.0f, 12383.0f, 12384.0f, 12385.0f, 12386.0f, 12387.0f, 12388.0f, 12389.0f, 12390.0f, 12391.0f, 12392.0f, 12393.0f, 12394.0f, 12395.0f, 12396.0f, 12397.0f, 12398.0f, 12399.0f, 12400.0f, 12401.0f, 12402.0f, 12403.0f, 12404.0f, 12405.0f, 12406.0f, 12407.0f, 12408.0f, 12409.0f, 12410.0f, 12411.0f, 12412.0f, 12413.0f, 12414.0f, 12415.0f, 12416.0f, 12417.0f, 12418.0f, 12419.0f, 12420.0f, 12421.0f, 12422.0f, 12423.0f, 12424.0f, 12425.0f, 12426.0f, 12427.0f, 12428.0f, 12429.0f, 12430.0f, 12431.0f, 12432.0f, 12433.0f, 12434.0f, 12435.0f, 12436.0f, 12437.0f, 12438.0f, 12439.0f, 12440.0f, 12441.0f, 12442.0f, 12443.0f, 12444.0f, 12445.0f, 12446.0f, 12447.0f, 12448.0f, 12449.0f, 12450.0f, 12451.0f, 12452.0f, 12453.0f, 12454.0f, 12455.0f, 12456.0f, 12457.0f, 12458.0f, 12459.0f, 12460.0f, 12461.0f, 12462.0f, 12463.0f, 12464.0f, 12465.0f, 12466.0f, 12467.0f, 12468.0f, 12469.0f, 12470.0f, 12471.0f, 12472.0f, 12473.0f, 12474.0f, 12475.0f, 12476.0f, 12477.0f, 12478.0f, 12479.0f, 12480.0f, 12481.0f, 12482.0f, 12483.0f, 12484.0f, 12485.0f, 12486.0f, 12487.0f, 12488.0f, 12489.0f, 12490.0f, 12491.0f, 12492.0f, 12493.0f, 12494.0f, 12495.0f, 12496.0f, 12497.0f, 12498.0f, 12499.0f, 12500.0f, 12501.0f, 12502.0f, 12503.0f, 12504.0f, 12505.0f, 12506.0f, 12507.0f, 12508.0f, 12509.0f, 12510.0f, 12511.0f, 12512.0f, 12513.0f, 12514.0f, 12515.0f, 12516.0f, 12517.0f, 12518.0f, 12519.0f, 12520.0f, 12521.0f, 12522.0f, 12523.0f, 12524.0f, 12525.0f, 12526.0f, 12527.0f, 12528.0f, 12529.0f, 12530.0f, 12531.0f, 12532.0f, 12533.0f, 12534.0f, 12535.0f, 12536.0f, 12537.0f, 12538.0f, 12539.0f, 12540.0f, 12541.0f, 12542.0f, 12543.0f, 12544.0f, 12545.0f, 12546.0f, 12547.0f, 12548.0f, 12549.0f, 12550.0f, 12551.0f, 12552.0f, 12553.0f, 12554.0f, 12555.0f, 12556.0f, 12557.0f, 12558.0f, 12559.0f, 12560.0f, 12561.0f, 12562.0f, 12563.0f, 12564.0f, 12565.0f, 12566.0f, 12567.0f, 12568.0f, 12569.0f, 12570.0f, 12571.0f, 12572.0f, 12573.0f, 12574.0f, 12575.0f, 12576.0f, 12577.0f, 12578.0f, 12579.0f, 12580.0f, 12581.0f, 12582.0f, 12583.0f, 12584.0f, 12585.0f, 12586.0f, 12587.0f, 12588.0f, 12589.0f, 12590.0f, 12591.0f, 12592.0f, 12593.0f, 12594.0f, 12595.0f, 12596.0f, 12597.0f, 12598.0f, 12599.0f, 12600.0f, 12601.0f, 12602.0f, 12603.0f, 12604.0f, 12605.0f, 12606.0f, 12607.0f, 12608.0f, 12609.0f, 12610.0f, 12611.0f, 12612.0f, 12613.0f, 12614.0f, 12615.0f, 12616.0f, 12617.0f, 12618.0f, 12619.0f, 12620.0f, 12621.0f, 12622.0f, 12623.0f, 12624.0f, 12625.0f, 12626.0f, 12627.0f, 12628.0f, 12629.0f, 12630.0f, 12631.0f, 12632.0f, 12633.0f, 12634.0f, 12635.0f, 12636.0f, 12637.0f, 12638.0f, 12639.0f, 12640.0f, 12641.0f, 12642.0f, 12643.0f, 12644.0f, 12645.0f, 12646.0f, 12647.0f, 12648.0f, 12649.0f, 12650.0f, 12651.0f, 12652.0f, 12653.0f, 12654.0f, 12655.0f, 12656.0f, 12657.0f, 12658.0f, 12659.0f, 12660.0f, 12661.0f, 12662.0f, 12663.0f, 12664.0f, 12665.0f, 12666.0f, 12667.0f, 12668.0f, 12669.0f, 12670.0f, 12671.0f, 12672.0f, 12673.0f, 12674.0f, 12675.0f, 12676.0f, 12677.0f, 12678.0f, 12679.0f, 12680.0f, 12681.0f, 12682.0f, 12683.0f, 12684.0f, 12685.0f, 12686.0f, 12687.0f, 12688.0f, 12689.0f, 12690.0f, 12691.0f, 12692.0f, 12693.0f, 12694.0f, 12695.0f, 12696.0f, 12697.0f, 12698.0f, 12699.0f, 12700.0f, 12701.0f, 12702.0f, 12703.0f, 12704.0f, 12705.0f, 12706.0f, 12707.0f, 12708.0f, 12709.0f, 12710.0f, 12711.0f, 12712.0f, 12713.0f, 12714.0f, 12715.0f, 12716.0f, 12717.0f, 12718.0f, 12719.0f}}, {1, {0.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, -6.0f, -7.0f, -8.0f, -9.0f, -10.0f, -11.0f, -12.0f, -13.0f, -14.0f, -15.0f, -16.0f, -17.0f, -18.0f, -19.0f, -20.0f, -21.0f, -22.0f, -23.0f, -24.0f, -25.0f, -26.0f, -27.0f, -28.0f, -29.0f, -30.0f, -31.0f, -32.0f, -33.0f, -34.0f, -35.0f, -36.0f, -37.0f, -38.0f, -39.0f, -40.0f, -41.0f, -42.0f, -43.0f, -44.0f, -45.0f, -46.0f, -47.0f, -48.0f, -49.0f, -50.0f, -51.0f, -52.0f, -53.0f, -54.0f, -55.0f, -56.0f, -57.0f, -58.0f, -59.0f, -60.0f, -61.0f, -62.0f, -63.0f, -64.0f, -65.0f, -66.0f, -67.0f, -68.0f, -69.0f, -70.0f, -71.0f, -72.0f, -73.0f, -74.0f, -75.0f, -76.0f, -77.0f, -78.0f, -79.0f, -80.0f, -81.0f, -82.0f, -83.0f, -84.0f, -85.0f, -86.0f, -87.0f, -88.0f, -89.0f, -90.0f, -91.0f, -92.0f, -93.0f, -94.0f, -95.0f, -96.0f, -97.0f, -98.0f, -99.0f, -100.0f, -101.0f, -102.0f, -103.0f, -104.0f, -105.0f, -106.0f, -107.0f, -108.0f, -109.0f, -110.0f, -111.0f, -112.0f, -113.0f, -114.0f, -115.0f, -116.0f, -117.0f, -118.0f, -119.0f, -120.0f, -121.0f, -122.0f, -123.0f, -124.0f, -125.0f, -126.0f, -127.0f, -128.0f, -129.0f, -130.0f, -131.0f, -132.0f, -133.0f, -134.0f, -135.0f, -136.0f, -137.0f, -138.0f, -139.0f, -140.0f, -141.0f, -142.0f, -143.0f, -144.0f, -145.0f, -146.0f, -147.0f, -148.0f, -149.0f, -150.0f, -151.0f, -152.0f, -153.0f, -154.0f, -155.0f, -156.0f, -157.0f, -158.0f, -159.0f, -160.0f, -161.0f, -162.0f, -163.0f, -164.0f, -165.0f, -166.0f, -167.0f, -168.0f, -169.0f, -170.0f, -171.0f, -172.0f, -173.0f, -174.0f, -175.0f, -176.0f, -177.0f, -178.0f, -179.0f, -180.0f, -181.0f, -182.0f, -183.0f, -184.0f, -185.0f, -186.0f, -187.0f, -188.0f, -189.0f, -190.0f, -191.0f, -192.0f, -193.0f, -194.0f, -195.0f, -196.0f, -197.0f, -198.0f, -199.0f, -200.0f, -201.0f, -202.0f, -203.0f, -204.0f, -205.0f, -206.0f, -207.0f, -208.0f, -209.0f, -210.0f, -211.0f, -212.0f, -213.0f, -214.0f, -215.0f, -216.0f, -217.0f, -218.0f, -219.0f, -220.0f, -221.0f, -222.0f, -223.0f, -224.0f, -225.0f, -226.0f, -227.0f, -228.0f, -229.0f, -230.0f, -231.0f, -232.0f, -233.0f, -234.0f, -235.0f, -236.0f, -237.0f, -238.0f, -239.0f, -240.0f, -241.0f, -242.0f, -243.0f, -244.0f, -245.0f, -246.0f, -247.0f, -248.0f, -249.0f, -250.0f, -251.0f, -252.0f, -253.0f, -254.0f, -255.0f, -256.0f, -257.0f, -258.0f, -259.0f, -260.0f, -261.0f, -262.0f, -263.0f, -264.0f, -265.0f, -266.0f, -267.0f, -268.0f, -269.0f, -270.0f, -271.0f, -272.0f, -273.0f, -274.0f, -275.0f, -276.0f, -277.0f, -278.0f, -279.0f, -280.0f, -281.0f, -282.0f, -283.0f, -284.0f, -285.0f, -286.0f, -287.0f, -288.0f, -289.0f, -290.0f, -291.0f, -292.0f, -293.0f, -294.0f, -295.0f, -296.0f, -297.0f, -298.0f, -299.0f, -300.0f, -301.0f, -302.0f, -303.0f, -304.0f, -305.0f, -306.0f, -307.0f, -308.0f, -309.0f, -310.0f, -311.0f, -312.0f, -313.0f, -314.0f, -315.0f, -316.0f, -317.0f, -318.0f, -319.0f, -320.0f, -321.0f, -322.0f, -323.0f, -324.0f, -325.0f, -326.0f, -327.0f, -328.0f, -329.0f, -330.0f, -331.0f, -332.0f, -333.0f, -334.0f, -335.0f, -336.0f, -337.0f, -338.0f, -339.0f, -340.0f, -341.0f, -342.0f, -343.0f, -344.0f, -345.0f, -346.0f, -347.0f, -348.0f, -349.0f, -350.0f, -351.0f, -352.0f, -353.0f, -354.0f, -355.0f, -356.0f, -357.0f, -358.0f, -359.0f, -360.0f, -361.0f, -362.0f, -363.0f, -364.0f, -365.0f, -366.0f, -367.0f, -368.0f, -369.0f, -370.0f, -371.0f, -372.0f, -373.0f, -374.0f, -375.0f, -376.0f, -377.0f, -378.0f, -379.0f, -380.0f, -381.0f, -382.0f, -383.0f, -384.0f, -385.0f, -386.0f, -387.0f, -388.0f, -389.0f, -390.0f, -391.0f, -392.0f, -393.0f, -394.0f, -395.0f, -396.0f, -397.0f, -398.0f, -399.0f, -400.0f, -401.0f, -402.0f, -403.0f, -404.0f, -405.0f, -406.0f, -407.0f, -408.0f, -409.0f, -410.0f, -411.0f, -412.0f, -413.0f, -414.0f, -415.0f, -416.0f, -417.0f, -418.0f, -419.0f, -420.0f, -421.0f, -422.0f, -423.0f, -424.0f, -425.0f, -426.0f, -427.0f, -428.0f, -429.0f, -430.0f, -431.0f, -432.0f, -433.0f, -434.0f, -435.0f, -436.0f, -437.0f, -438.0f, -439.0f, -440.0f, -441.0f, -442.0f, -443.0f, -444.0f, -445.0f, -446.0f, -447.0f, -448.0f, -449.0f, -450.0f, -451.0f, -452.0f, -453.0f, -454.0f, -455.0f, -456.0f, -457.0f, -458.0f, -459.0f, -460.0f, -461.0f, -462.0f, -463.0f, -464.0f, -465.0f, -466.0f, -467.0f, -468.0f, -469.0f, -470.0f, -471.0f, -472.0f, -473.0f, -474.0f, -475.0f, -476.0f, -477.0f, -478.0f, -479.0f, -480.0f, -481.0f, -482.0f, -483.0f, -484.0f, -485.0f, -486.0f, -487.0f, -488.0f, -489.0f, -490.0f, -491.0f, -492.0f, -493.0f, -494.0f, -495.0f, -496.0f, -497.0f, -498.0f, -499.0f, -500.0f, -501.0f, -502.0f, -503.0f, -504.0f, -505.0f, -506.0f, -507.0f, -508.0f, -509.0f, -510.0f, -511.0f, -512.0f, -513.0f, -514.0f, -515.0f, -516.0f, -517.0f, -518.0f, -519.0f, -520.0f, -521.0f, -522.0f, -523.0f, -524.0f, -525.0f, -526.0f, -527.0f, -528.0f, -529.0f, -530.0f, -531.0f, -532.0f, -533.0f, -534.0f, -535.0f, -536.0f, -537.0f, -538.0f, -539.0f, -540.0f, -541.0f, -542.0f, -543.0f, -544.0f, -545.0f, -546.0f, -547.0f, -548.0f, -549.0f, -550.0f, -551.0f, -552.0f, -553.0f, -554.0f, -555.0f, -556.0f, -557.0f, -558.0f, -559.0f, -560.0f, -561.0f, -562.0f, -563.0f, -564.0f, -565.0f, -566.0f, -567.0f, -568.0f, -569.0f, -570.0f, -571.0f, -572.0f, -573.0f, -574.0f, -575.0f, -576.0f, -577.0f, -578.0f, -579.0f, -580.0f, -581.0f, -582.0f, -583.0f, -584.0f, -585.0f, -586.0f, -587.0f, -588.0f, -589.0f, -590.0f, -591.0f, -592.0f, -593.0f, -594.0f, -595.0f, -596.0f, -597.0f, -598.0f, -599.0f, -600.0f, -601.0f, -602.0f, -603.0f, -604.0f, -605.0f, -606.0f, -607.0f, -608.0f, -609.0f, -610.0f, -611.0f, -612.0f, -613.0f, -614.0f, -615.0f, -616.0f, -617.0f, -618.0f, -619.0f, -620.0f, -621.0f, -622.0f, -623.0f, -624.0f, -625.0f, -626.0f, -627.0f, -628.0f, -629.0f, -630.0f, -631.0f, -632.0f, -633.0f, -634.0f, -635.0f, -636.0f, -637.0f, -638.0f, -639.0f, -640.0f, -641.0f, -642.0f, -643.0f, -644.0f, -645.0f, -646.0f, -647.0f, -648.0f, -649.0f, -650.0f, -651.0f, -652.0f, -653.0f, -654.0f, -655.0f, -656.0f, -657.0f, -658.0f, -659.0f, -660.0f, -661.0f, -662.0f, -663.0f, -664.0f, -665.0f, -666.0f, -667.0f, -668.0f, -669.0f, -670.0f, -671.0f, -672.0f, -673.0f, -674.0f, -675.0f, -676.0f, -677.0f, -678.0f, -679.0f, -680.0f, -681.0f, -682.0f, -683.0f, -684.0f, -685.0f, -686.0f, -687.0f, -688.0f, -689.0f, -690.0f, -691.0f, -692.0f, -693.0f, -694.0f, -695.0f, -696.0f, -697.0f, -698.0f, -699.0f, -700.0f, -701.0f, -702.0f, -703.0f, -704.0f, -705.0f, -706.0f, -707.0f, -708.0f, -709.0f, -710.0f, -711.0f, -712.0f, -713.0f, -714.0f, -715.0f, -716.0f, -717.0f, -718.0f, -719.0f, -720.0f, -721.0f, -722.0f, -723.0f, -724.0f, -725.0f, -726.0f, -727.0f, -728.0f, -729.0f, -730.0f, -731.0f, -732.0f, -733.0f, -734.0f, -735.0f, -736.0f, -737.0f, -738.0f, -739.0f, -740.0f, -741.0f, -742.0f, -743.0f, -744.0f, -745.0f, -746.0f, -747.0f, -748.0f, -749.0f, -750.0f, -751.0f, -752.0f, -753.0f, -754.0f, -755.0f, -756.0f, -757.0f, -758.0f, -759.0f, -760.0f, -761.0f, -762.0f, -763.0f, -764.0f, -765.0f, -766.0f, -767.0f, -768.0f, -769.0f, -770.0f, -771.0f, -772.0f, -773.0f, -774.0f, -775.0f, -776.0f, -777.0f, -778.0f, -779.0f, -780.0f, -781.0f, -782.0f, -783.0f, -784.0f, -785.0f, -786.0f, -787.0f, -788.0f, -789.0f, -790.0f, -791.0f, -792.0f, -793.0f, -794.0f, -795.0f, -796.0f, -797.0f, -798.0f, -799.0f, -800.0f, -801.0f, -802.0f, -803.0f, -804.0f, -805.0f, -806.0f, -807.0f, -808.0f, -809.0f, -810.0f, -811.0f, -812.0f, -813.0f, -814.0f, -815.0f, -816.0f, -817.0f, -818.0f, -819.0f, -820.0f, -821.0f, -822.0f, -823.0f, -824.0f, -825.0f, -826.0f, -827.0f, -828.0f, -829.0f, -830.0f, -831.0f, -832.0f, -833.0f, -834.0f, -835.0f, -836.0f, -837.0f, -838.0f, -839.0f, -840.0f, -841.0f, -842.0f, -843.0f, -844.0f, -845.0f, -846.0f, -847.0f, -848.0f, -849.0f, -850.0f, -851.0f, -852.0f, -853.0f, -854.0f, -855.0f, -856.0f, -857.0f, -858.0f, -859.0f, -860.0f, -861.0f, -862.0f, -863.0f, -864.0f, -865.0f, -866.0f, -867.0f, -868.0f, -869.0f, -870.0f, -871.0f, -872.0f, -873.0f, -874.0f, -875.0f, -876.0f, -877.0f, -878.0f, -879.0f, -880.0f, -881.0f, -882.0f, -883.0f, -884.0f, -885.0f, -886.0f, -887.0f, -888.0f, -889.0f, -890.0f, -891.0f, -892.0f, -893.0f, -894.0f, -895.0f, -896.0f, -897.0f, -898.0f, -899.0f, -900.0f, -901.0f, -902.0f, -903.0f, -904.0f, -905.0f, -906.0f, -907.0f, -908.0f, -909.0f, -910.0f, -911.0f, -912.0f, -913.0f, -914.0f, -915.0f, -916.0f, -917.0f, -918.0f, -919.0f, -920.0f, -921.0f, -922.0f, -923.0f, -924.0f, -925.0f, -926.0f, -927.0f, -928.0f, -929.0f, -930.0f, -931.0f, -932.0f, -933.0f, -934.0f, -935.0f, -936.0f, -937.0f, -938.0f, -939.0f, -940.0f, -941.0f, -942.0f, -943.0f, -944.0f, -945.0f, -946.0f, -947.0f, -948.0f, -949.0f, -950.0f, -951.0f, -952.0f, -953.0f, -954.0f, -955.0f, -956.0f, -957.0f, -958.0f, -959.0f, -960.0f, -961.0f, -962.0f, -963.0f, -964.0f, -965.0f, -966.0f, -967.0f, -968.0f, -969.0f, -970.0f, -971.0f, -972.0f, -973.0f, -974.0f, -975.0f, -976.0f, -977.0f, -978.0f, -979.0f, -980.0f, -981.0f, -982.0f, -983.0f, -984.0f, -985.0f, -986.0f, -987.0f, -988.0f, -989.0f, -990.0f, -991.0f, -992.0f, -993.0f, -994.0f, -995.0f, -996.0f, -997.0f, -998.0f, -999.0f, -1000.0f, -1001.0f, -1002.0f, -1003.0f, -1004.0f, -1005.0f, -1006.0f, -1007.0f, -1008.0f, -1009.0f, -1010.0f, -1011.0f, -1012.0f, -1013.0f, -1014.0f, -1015.0f, -1016.0f, -1017.0f, -1018.0f, -1019.0f, -1020.0f, -1021.0f, -1022.0f, -1023.0f, -1024.0f, -1025.0f, -1026.0f, -1027.0f, -1028.0f, -1029.0f, -1030.0f, -1031.0f, -1032.0f, -1033.0f, -1034.0f, -1035.0f, -1036.0f, -1037.0f, -1038.0f, -1039.0f, -1040.0f, -1041.0f, -1042.0f, -1043.0f, -1044.0f, -1045.0f, -1046.0f, -1047.0f, -1048.0f, -1049.0f, -1050.0f, -1051.0f, -1052.0f, -1053.0f, -1054.0f, -1055.0f, -1056.0f, -1057.0f, -1058.0f, -1059.0f, -1060.0f, -1061.0f, -1062.0f, -1063.0f, -1064.0f, -1065.0f, -1066.0f, -1067.0f, -1068.0f, -1069.0f, -1070.0f, -1071.0f, -1072.0f, -1073.0f, -1074.0f, -1075.0f, -1076.0f, -1077.0f, -1078.0f, -1079.0f, -1080.0f, -1081.0f, -1082.0f, -1083.0f, -1084.0f, -1085.0f, -1086.0f, -1087.0f, -1088.0f, -1089.0f, -1090.0f, -1091.0f, -1092.0f, -1093.0f, -1094.0f, -1095.0f, -1096.0f, -1097.0f, -1098.0f, -1099.0f, -1100.0f, -1101.0f, -1102.0f, -1103.0f, -1104.0f, -1105.0f, -1106.0f, -1107.0f, -1108.0f, -1109.0f, -1110.0f, -1111.0f, -1112.0f, -1113.0f, -1114.0f, -1115.0f, -1116.0f, -1117.0f, -1118.0f, -1119.0f, -1120.0f, -1121.0f, -1122.0f, -1123.0f, -1124.0f, -1125.0f, -1126.0f, -1127.0f, -1128.0f, -1129.0f, -1130.0f, -1131.0f, -1132.0f, -1133.0f, -1134.0f, -1135.0f, -1136.0f, -1137.0f, -1138.0f, -1139.0f, -1140.0f, -1141.0f, -1142.0f, -1143.0f, -1144.0f, -1145.0f, -1146.0f, -1147.0f, -1148.0f, -1149.0f, -1150.0f, -1151.0f, -1152.0f, -1153.0f, -1154.0f, -1155.0f, -1156.0f, -1157.0f, -1158.0f, -1159.0f, -1160.0f, -1161.0f, -1162.0f, -1163.0f, -1164.0f, -1165.0f, -1166.0f, -1167.0f, -1168.0f, -1169.0f, -1170.0f, -1171.0f, -1172.0f, -1173.0f, -1174.0f, -1175.0f, -1176.0f, -1177.0f, -1178.0f, -1179.0f, -1180.0f, -1181.0f, -1182.0f, -1183.0f, -1184.0f, -1185.0f, -1186.0f, -1187.0f, -1188.0f, -1189.0f, -1190.0f, -1191.0f, -1192.0f, -1193.0f, -1194.0f, -1195.0f, -1196.0f, -1197.0f, -1198.0f, -1199.0f, -1200.0f, -1201.0f, -1202.0f, -1203.0f, -1204.0f, -1205.0f, -1206.0f, -1207.0f, -1208.0f, -1209.0f, -1210.0f, -1211.0f, -1212.0f, -1213.0f, -1214.0f, -1215.0f, -1216.0f, -1217.0f, -1218.0f, -1219.0f, -1220.0f, -1221.0f, -1222.0f, -1223.0f, -1224.0f, -1225.0f, -1226.0f, -1227.0f, -1228.0f, -1229.0f, -1230.0f, -1231.0f, -1232.0f, -1233.0f, -1234.0f, -1235.0f, -1236.0f, -1237.0f, -1238.0f, -1239.0f, -1240.0f, -1241.0f, -1242.0f, -1243.0f, -1244.0f, -1245.0f, -1246.0f, -1247.0f, -1248.0f, -1249.0f, -1250.0f, -1251.0f, -1252.0f, -1253.0f, -1254.0f, -1255.0f, -1256.0f, -1257.0f, -1258.0f, -1259.0f, -1260.0f, -1261.0f, -1262.0f, -1263.0f, -1264.0f, -1265.0f, -1266.0f, -1267.0f, -1268.0f, -1269.0f, -1270.0f, -1271.0f, -1272.0f, -1273.0f, -1274.0f, -1275.0f, -1276.0f, -1277.0f, -1278.0f, -1279.0f, -1280.0f, -1281.0f, -1282.0f, -1283.0f, -1284.0f, -1285.0f, -1286.0f, -1287.0f, -1288.0f, -1289.0f, -1290.0f, -1291.0f, -1292.0f, -1293.0f, -1294.0f, -1295.0f, -1296.0f, -1297.0f, -1298.0f, -1299.0f, -1300.0f, -1301.0f, -1302.0f, -1303.0f, -1304.0f, -1305.0f, -1306.0f, -1307.0f, -1308.0f, -1309.0f, -1310.0f, -1311.0f, -1312.0f, -1313.0f, -1314.0f, -1315.0f, -1316.0f, -1317.0f, -1318.0f, -1319.0f, -1320.0f, -1321.0f, -1322.0f, -1323.0f, -1324.0f, -1325.0f, -1326.0f, -1327.0f, -1328.0f, -1329.0f, -1330.0f, -1331.0f, -1332.0f, -1333.0f, -1334.0f, -1335.0f, -1336.0f, -1337.0f, -1338.0f, -1339.0f, -1340.0f, -1341.0f, -1342.0f, -1343.0f, -1344.0f, -1345.0f, -1346.0f, -1347.0f, -1348.0f, -1349.0f, -1350.0f, -1351.0f, -1352.0f, -1353.0f, -1354.0f, -1355.0f, -1356.0f, -1357.0f, -1358.0f, -1359.0f, -1360.0f, -1361.0f, -1362.0f, -1363.0f, -1364.0f, -1365.0f, -1366.0f, -1367.0f, -1368.0f, -1369.0f, -1370.0f, -1371.0f, -1372.0f, -1373.0f, -1374.0f, -1375.0f, -1376.0f, -1377.0f, -1378.0f, -1379.0f, -1380.0f, -1381.0f, -1382.0f, -1383.0f, -1384.0f, -1385.0f, -1386.0f, -1387.0f, -1388.0f, -1389.0f, -1390.0f, -1391.0f, -1392.0f, -1393.0f, -1394.0f, -1395.0f, -1396.0f, -1397.0f, -1398.0f, -1399.0f, -1400.0f, -1401.0f, -1402.0f, -1403.0f, -1404.0f, -1405.0f, -1406.0f, -1407.0f, -1408.0f, -1409.0f, -1410.0f, -1411.0f, -1412.0f, -1413.0f, -1414.0f, -1415.0f, -1416.0f, -1417.0f, -1418.0f, -1419.0f, -1420.0f, -1421.0f, -1422.0f, -1423.0f, -1424.0f, -1425.0f, -1426.0f, -1427.0f, -1428.0f, -1429.0f, -1430.0f, -1431.0f, -1432.0f, -1433.0f, -1434.0f, -1435.0f, -1436.0f, -1437.0f, -1438.0f, -1439.0f, -1440.0f, -1441.0f, -1442.0f, -1443.0f, -1444.0f, -1445.0f, -1446.0f, -1447.0f, -1448.0f, -1449.0f, -1450.0f, -1451.0f, -1452.0f, -1453.0f, -1454.0f, -1455.0f, -1456.0f, -1457.0f, -1458.0f, -1459.0f, -1460.0f, -1461.0f, -1462.0f, -1463.0f, -1464.0f, -1465.0f, -1466.0f, -1467.0f, -1468.0f, -1469.0f, -1470.0f, -1471.0f, -1472.0f, -1473.0f, -1474.0f, -1475.0f, -1476.0f, -1477.0f, -1478.0f, -1479.0f, -1480.0f, -1481.0f, -1482.0f, -1483.0f, -1484.0f, -1485.0f, -1486.0f, -1487.0f, -1488.0f, -1489.0f, -1490.0f, -1491.0f, -1492.0f, -1493.0f, -1494.0f, -1495.0f, -1496.0f, -1497.0f, -1498.0f, -1499.0f, -1500.0f, -1501.0f, -1502.0f, -1503.0f, -1504.0f, -1505.0f, -1506.0f, -1507.0f, -1508.0f, -1509.0f, -1510.0f, -1511.0f, -1512.0f, -1513.0f, -1514.0f, -1515.0f, -1516.0f, -1517.0f, -1518.0f, -1519.0f, -1520.0f, -1521.0f, -1522.0f, -1523.0f, -1524.0f, -1525.0f, -1526.0f, -1527.0f, -1528.0f, -1529.0f, -1530.0f, -1531.0f, -1532.0f, -1533.0f, -1534.0f, -1535.0f, -1536.0f, -1537.0f, -1538.0f, -1539.0f, -1540.0f, -1541.0f, -1542.0f, -1543.0f, -1544.0f, -1545.0f, -1546.0f, -1547.0f, -1548.0f, -1549.0f, -1550.0f, -1551.0f, -1552.0f, -1553.0f, -1554.0f, -1555.0f, -1556.0f, -1557.0f, -1558.0f, -1559.0f, -1560.0f, -1561.0f, -1562.0f, -1563.0f, -1564.0f, -1565.0f, -1566.0f, -1567.0f, -1568.0f, -1569.0f, -1570.0f, -1571.0f, -1572.0f, -1573.0f, -1574.0f, -1575.0f, -1576.0f, -1577.0f, -1578.0f, -1579.0f, -1580.0f, -1581.0f, -1582.0f, -1583.0f, -1584.0f, -1585.0f, -1586.0f, -1587.0f, -1588.0f, -1589.0f, -1590.0f, -1591.0f, -1592.0f, -1593.0f, -1594.0f, -1595.0f, -1596.0f, -1597.0f, -1598.0f, -1599.0f, -1600.0f, -1601.0f, -1602.0f, -1603.0f, -1604.0f, -1605.0f, -1606.0f, -1607.0f, -1608.0f, -1609.0f, -1610.0f, -1611.0f, -1612.0f, -1613.0f, -1614.0f, -1615.0f, -1616.0f, -1617.0f, -1618.0f, -1619.0f, -1620.0f, -1621.0f, -1622.0f, -1623.0f, -1624.0f, -1625.0f, -1626.0f, -1627.0f, -1628.0f, -1629.0f, -1630.0f, -1631.0f, -1632.0f, -1633.0f, -1634.0f, -1635.0f, -1636.0f, -1637.0f, -1638.0f, -1639.0f, -1640.0f, -1641.0f, -1642.0f, -1643.0f, -1644.0f, -1645.0f, -1646.0f, -1647.0f, -1648.0f, -1649.0f, -1650.0f, -1651.0f, -1652.0f, -1653.0f, -1654.0f, -1655.0f, -1656.0f, -1657.0f, -1658.0f, -1659.0f, -1660.0f, -1661.0f, -1662.0f, -1663.0f, -1664.0f, -1665.0f, -1666.0f, -1667.0f, -1668.0f, -1669.0f, -1670.0f, -1671.0f, -1672.0f, -1673.0f, -1674.0f, -1675.0f, -1676.0f, -1677.0f, -1678.0f, -1679.0f, -1680.0f, -1681.0f, -1682.0f, -1683.0f, -1684.0f, -1685.0f, -1686.0f, -1687.0f, -1688.0f, -1689.0f, -1690.0f, -1691.0f, -1692.0f, -1693.0f, -1694.0f, -1695.0f, -1696.0f, -1697.0f, -1698.0f, -1699.0f, -1700.0f, -1701.0f, -1702.0f, -1703.0f, -1704.0f, -1705.0f, -1706.0f, -1707.0f, -1708.0f, -1709.0f, -1710.0f, -1711.0f, -1712.0f, -1713.0f, -1714.0f, -1715.0f, -1716.0f, -1717.0f, -1718.0f, -1719.0f, -1720.0f, -1721.0f, -1722.0f, -1723.0f, -1724.0f, -1725.0f, -1726.0f, -1727.0f, -1728.0f, -1729.0f, -1730.0f, -1731.0f, -1732.0f, -1733.0f, -1734.0f, -1735.0f, -1736.0f, -1737.0f, -1738.0f, -1739.0f, -1740.0f, -1741.0f, -1742.0f, -1743.0f, -1744.0f, -1745.0f, -1746.0f, -1747.0f, -1748.0f, -1749.0f, -1750.0f, -1751.0f, -1752.0f, -1753.0f, -1754.0f, -1755.0f, -1756.0f, -1757.0f, -1758.0f, -1759.0f, -1760.0f, -1761.0f, -1762.0f, -1763.0f, -1764.0f, -1765.0f, -1766.0f, -1767.0f, -1768.0f, -1769.0f, -1770.0f, -1771.0f, -1772.0f, -1773.0f, -1774.0f, -1775.0f, -1776.0f, -1777.0f, -1778.0f, -1779.0f, -1780.0f, -1781.0f, -1782.0f, -1783.0f, -1784.0f, -1785.0f, -1786.0f, -1787.0f, -1788.0f, -1789.0f, -1790.0f, -1791.0f, -1792.0f, -1793.0f, -1794.0f, -1795.0f, -1796.0f, -1797.0f, -1798.0f, -1799.0f, -1800.0f, -1801.0f, -1802.0f, -1803.0f, -1804.0f, -1805.0f, -1806.0f, -1807.0f, -1808.0f, -1809.0f, -1810.0f, -1811.0f, -1812.0f, -1813.0f, -1814.0f, -1815.0f, -1816.0f, -1817.0f, -1818.0f, -1819.0f, -1820.0f, -1821.0f, -1822.0f, -1823.0f, -1824.0f, -1825.0f, -1826.0f, -1827.0f, -1828.0f, -1829.0f, -1830.0f, -1831.0f, -1832.0f, -1833.0f, -1834.0f, -1835.0f, -1836.0f, -1837.0f, -1838.0f, -1839.0f, -1840.0f, -1841.0f, -1842.0f, -1843.0f, -1844.0f, -1845.0f, -1846.0f, -1847.0f, -1848.0f, -1849.0f, -1850.0f, -1851.0f, -1852.0f, -1853.0f, -1854.0f, -1855.0f, -1856.0f, -1857.0f, -1858.0f, -1859.0f, -1860.0f, -1861.0f, -1862.0f, -1863.0f, -1864.0f, -1865.0f, -1866.0f, -1867.0f, -1868.0f, -1869.0f, -1870.0f, -1871.0f, -1872.0f, -1873.0f, -1874.0f, -1875.0f, -1876.0f, -1877.0f, -1878.0f, -1879.0f, -1880.0f, -1881.0f, -1882.0f, -1883.0f, -1884.0f, -1885.0f, -1886.0f, -1887.0f, -1888.0f, -1889.0f, -1890.0f, -1891.0f, -1892.0f, -1893.0f, -1894.0f, -1895.0f, -1896.0f, -1897.0f, -1898.0f, -1899.0f, -1900.0f, -1901.0f, -1902.0f, -1903.0f, -1904.0f, -1905.0f, -1906.0f, -1907.0f, -1908.0f, -1909.0f, -1910.0f, -1911.0f, -1912.0f, -1913.0f, -1914.0f, -1915.0f, -1916.0f, -1917.0f, -1918.0f, -1919.0f, -1920.0f, -1921.0f, -1922.0f, -1923.0f, -1924.0f, -1925.0f, -1926.0f, -1927.0f, -1928.0f, -1929.0f, -1930.0f, -1931.0f, -1932.0f, -1933.0f, -1934.0f, -1935.0f, -1936.0f, -1937.0f, -1938.0f, -1939.0f, -1940.0f, -1941.0f, -1942.0f, -1943.0f, -1944.0f, -1945.0f, -1946.0f, -1947.0f, -1948.0f, -1949.0f, -1950.0f, -1951.0f, -1952.0f, -1953.0f, -1954.0f, -1955.0f, -1956.0f, -1957.0f, -1958.0f, -1959.0f, -1960.0f, -1961.0f, -1962.0f, -1963.0f, -1964.0f, -1965.0f, -1966.0f, -1967.0f, -1968.0f, -1969.0f, -1970.0f, -1971.0f, -1972.0f, -1973.0f, -1974.0f, -1975.0f, -1976.0f, -1977.0f, -1978.0f, -1979.0f, -1980.0f, -1981.0f, -1982.0f, -1983.0f, -1984.0f, -1985.0f, -1986.0f, -1987.0f, -1988.0f, -1989.0f, -1990.0f, -1991.0f, -1992.0f, -1993.0f, -1994.0f, -1995.0f, -1996.0f, -1997.0f, -1998.0f, -1999.0f, -2000.0f, -2001.0f, -2002.0f, -2003.0f, -2004.0f, -2005.0f, -2006.0f, -2007.0f, -2008.0f, -2009.0f, -2010.0f, -2011.0f, -2012.0f, -2013.0f, -2014.0f, -2015.0f, -2016.0f, -2017.0f, -2018.0f, -2019.0f, -2020.0f, -2021.0f, -2022.0f, -2023.0f, -2024.0f, -2025.0f, -2026.0f, -2027.0f, -2028.0f, -2029.0f, -2030.0f, -2031.0f, -2032.0f, -2033.0f, -2034.0f, -2035.0f, -2036.0f, -2037.0f, -2038.0f, -2039.0f, -2040.0f, -2041.0f, -2042.0f, -2043.0f, -2044.0f, -2045.0f, -2046.0f, -2047.0f, -2048.0f, -2049.0f, -2050.0f, -2051.0f, -2052.0f, -2053.0f, -2054.0f, -2055.0f, -2056.0f, -2057.0f, -2058.0f, -2059.0f, -2060.0f, -2061.0f, -2062.0f, -2063.0f, -2064.0f, -2065.0f, -2066.0f, -2067.0f, -2068.0f, -2069.0f, -2070.0f, -2071.0f, -2072.0f, -2073.0f, -2074.0f, -2075.0f, -2076.0f, -2077.0f, -2078.0f, -2079.0f, -2080.0f, -2081.0f, -2082.0f, -2083.0f, -2084.0f, -2085.0f, -2086.0f, -2087.0f, -2088.0f, -2089.0f, -2090.0f, -2091.0f, -2092.0f, -2093.0f, -2094.0f, -2095.0f, -2096.0f, -2097.0f, -2098.0f, -2099.0f, -2100.0f, -2101.0f, -2102.0f, -2103.0f, -2104.0f, -2105.0f, -2106.0f, -2107.0f, -2108.0f, -2109.0f, -2110.0f, -2111.0f, -2112.0f, -2113.0f, -2114.0f, -2115.0f, -2116.0f, -2117.0f, -2118.0f, -2119.0f, -2120.0f, -2121.0f, -2122.0f, -2123.0f, -2124.0f, -2125.0f, -2126.0f, -2127.0f, -2128.0f, -2129.0f, -2130.0f, -2131.0f, -2132.0f, -2133.0f, -2134.0f, -2135.0f, -2136.0f, -2137.0f, -2138.0f, -2139.0f, -2140.0f, -2141.0f, -2142.0f, -2143.0f, -2144.0f, -2145.0f, -2146.0f, -2147.0f, -2148.0f, -2149.0f, -2150.0f, -2151.0f, -2152.0f, -2153.0f, -2154.0f, -2155.0f, -2156.0f, -2157.0f, -2158.0f, -2159.0f, -2160.0f, -2161.0f, -2162.0f, -2163.0f, -2164.0f, -2165.0f, -2166.0f, -2167.0f, -2168.0f, -2169.0f, -2170.0f, -2171.0f, -2172.0f, -2173.0f, -2174.0f, -2175.0f, -2176.0f, -2177.0f, -2178.0f, -2179.0f, -2180.0f, -2181.0f, -2182.0f, -2183.0f, -2184.0f, -2185.0f, -2186.0f, -2187.0f, -2188.0f, -2189.0f, -2190.0f, -2191.0f, -2192.0f, -2193.0f, -2194.0f, -2195.0f, -2196.0f, -2197.0f, -2198.0f, -2199.0f, -2200.0f, -2201.0f, -2202.0f, -2203.0f, -2204.0f, -2205.0f, -2206.0f, -2207.0f, -2208.0f, -2209.0f, -2210.0f, -2211.0f, -2212.0f, -2213.0f, -2214.0f, -2215.0f, -2216.0f, -2217.0f, -2218.0f, -2219.0f, -2220.0f, -2221.0f, -2222.0f, -2223.0f, -2224.0f, -2225.0f, -2226.0f, -2227.0f, -2228.0f, -2229.0f, -2230.0f, -2231.0f, -2232.0f, -2233.0f, -2234.0f, -2235.0f, -2236.0f, -2237.0f, -2238.0f, -2239.0f, -2240.0f, -2241.0f, -2242.0f, -2243.0f, -2244.0f, -2245.0f, -2246.0f, -2247.0f, -2248.0f, -2249.0f, -2250.0f, -2251.0f, -2252.0f, -2253.0f, -2254.0f, -2255.0f, -2256.0f, -2257.0f, -2258.0f, -2259.0f, -2260.0f, -2261.0f, -2262.0f, -2263.0f, -2264.0f, -2265.0f, -2266.0f, -2267.0f, -2268.0f, -2269.0f, -2270.0f, -2271.0f, -2272.0f, -2273.0f, -2274.0f, -2275.0f, -2276.0f, -2277.0f, -2278.0f, -2279.0f, -2280.0f, -2281.0f, -2282.0f, -2283.0f, -2284.0f, -2285.0f, -2286.0f, -2287.0f, -2288.0f, -2289.0f, -2290.0f, -2291.0f, -2292.0f, -2293.0f, -2294.0f, -2295.0f, -2296.0f, -2297.0f, -2298.0f, -2299.0f, -2300.0f, -2301.0f, -2302.0f, -2303.0f, -2304.0f, -2305.0f, -2306.0f, -2307.0f, -2308.0f, -2309.0f, -2310.0f, -2311.0f, -2312.0f, -2313.0f, -2314.0f, -2315.0f, -2316.0f, -2317.0f, -2318.0f, -2319.0f, -2320.0f, -2321.0f, -2322.0f, -2323.0f, -2324.0f, -2325.0f, -2326.0f, -2327.0f, -2328.0f, -2329.0f, -2330.0f, -2331.0f, -2332.0f, -2333.0f, -2334.0f, -2335.0f, -2336.0f, -2337.0f, -2338.0f, -2339.0f, -2340.0f, -2341.0f, -2342.0f, -2343.0f, -2344.0f, -2345.0f, -2346.0f, -2347.0f, -2348.0f, -2349.0f, -2350.0f, -2351.0f, -2352.0f, -2353.0f, -2354.0f, -2355.0f, -2356.0f, -2357.0f, -2358.0f, -2359.0f, -2360.0f, -2361.0f, -2362.0f, -2363.0f, -2364.0f, -2365.0f, -2366.0f, -2367.0f, -2368.0f, -2369.0f, -2370.0f, -2371.0f, -2372.0f, -2373.0f, -2374.0f, -2375.0f, -2376.0f, -2377.0f, -2378.0f, -2379.0f, -2380.0f, -2381.0f, -2382.0f, -2383.0f, -2384.0f, -2385.0f, -2386.0f, -2387.0f, -2388.0f, -2389.0f, -2390.0f, -2391.0f, -2392.0f, -2393.0f, -2394.0f, -2395.0f, -2396.0f, -2397.0f, -2398.0f, -2399.0f, -2400.0f, -2401.0f, -2402.0f, -2403.0f, -2404.0f, -2405.0f, -2406.0f, -2407.0f, -2408.0f, -2409.0f, -2410.0f, -2411.0f, -2412.0f, -2413.0f, -2414.0f, -2415.0f, -2416.0f, -2417.0f, -2418.0f, -2419.0f, -2420.0f, -2421.0f, -2422.0f, -2423.0f, -2424.0f, -2425.0f, -2426.0f, -2427.0f, -2428.0f, -2429.0f, -2430.0f, -2431.0f, -2432.0f, -2433.0f, -2434.0f, -2435.0f, -2436.0f, -2437.0f, -2438.0f, -2439.0f, -2440.0f, -2441.0f, -2442.0f, -2443.0f, -2444.0f, -2445.0f, -2446.0f, -2447.0f, -2448.0f, -2449.0f, -2450.0f, -2451.0f, -2452.0f, -2453.0f, -2454.0f, -2455.0f, -2456.0f, -2457.0f, -2458.0f, -2459.0f, -2460.0f, -2461.0f, -2462.0f, -2463.0f, -2464.0f, -2465.0f, -2466.0f, -2467.0f, -2468.0f, -2469.0f, -2470.0f, -2471.0f, -2472.0f, -2473.0f, -2474.0f, -2475.0f, -2476.0f, -2477.0f, -2478.0f, -2479.0f, -2480.0f, -2481.0f, -2482.0f, -2483.0f, -2484.0f, -2485.0f, -2486.0f, -2487.0f, -2488.0f, -2489.0f, -2490.0f, -2491.0f, -2492.0f, -2493.0f, -2494.0f, -2495.0f, -2496.0f, -2497.0f, -2498.0f, -2499.0f, -2500.0f, -2501.0f, -2502.0f, -2503.0f, -2504.0f, -2505.0f, -2506.0f, -2507.0f, -2508.0f, -2509.0f, -2510.0f, -2511.0f, -2512.0f, -2513.0f, -2514.0f, -2515.0f, -2516.0f, -2517.0f, -2518.0f, -2519.0f, -2520.0f, -2521.0f, -2522.0f, -2523.0f, -2524.0f, -2525.0f, -2526.0f, -2527.0f, -2528.0f, -2529.0f, -2530.0f, -2531.0f, -2532.0f, -2533.0f, -2534.0f, -2535.0f, -2536.0f, -2537.0f, -2538.0f, -2539.0f, -2540.0f, -2541.0f, -2542.0f, -2543.0f, -2544.0f, -2545.0f, -2546.0f, -2547.0f, -2548.0f, -2549.0f, -2550.0f, -2551.0f, -2552.0f, -2553.0f, -2554.0f, -2555.0f, -2556.0f, -2557.0f, -2558.0f, -2559.0f, -2560.0f, -2561.0f, -2562.0f, -2563.0f, -2564.0f, -2565.0f, -2566.0f, -2567.0f, -2568.0f, -2569.0f, -2570.0f, -2571.0f, -2572.0f, -2573.0f, -2574.0f, -2575.0f, -2576.0f, -2577.0f, -2578.0f, -2579.0f, -2580.0f, -2581.0f, -2582.0f, -2583.0f, -2584.0f, -2585.0f, -2586.0f, -2587.0f, -2588.0f, -2589.0f, -2590.0f, -2591.0f, -2592.0f, -2593.0f, -2594.0f, -2595.0f, -2596.0f, -2597.0f, -2598.0f, -2599.0f, -2600.0f, -2601.0f, -2602.0f, -2603.0f, -2604.0f, -2605.0f, -2606.0f, -2607.0f, -2608.0f, -2609.0f, -2610.0f, -2611.0f, -2612.0f, -2613.0f, -2614.0f, -2615.0f, -2616.0f, -2617.0f, -2618.0f, -2619.0f, -2620.0f, -2621.0f, -2622.0f, -2623.0f, -2624.0f, -2625.0f, -2626.0f, -2627.0f, -2628.0f, -2629.0f, -2630.0f, -2631.0f, -2632.0f, -2633.0f, -2634.0f, -2635.0f, -2636.0f, -2637.0f, -2638.0f, -2639.0f, -2640.0f, -2641.0f, -2642.0f, -2643.0f, -2644.0f, -2645.0f, -2646.0f, -2647.0f, -2648.0f, -2649.0f, -2650.0f, -2651.0f, -2652.0f, -2653.0f, -2654.0f, -2655.0f, -2656.0f, -2657.0f, -2658.0f, -2659.0f, -2660.0f, -2661.0f, -2662.0f, -2663.0f, -2664.0f, -2665.0f, -2666.0f, -2667.0f, -2668.0f, -2669.0f, -2670.0f, -2671.0f, -2672.0f, -2673.0f, -2674.0f, -2675.0f, -2676.0f, -2677.0f, -2678.0f, -2679.0f, -2680.0f, -2681.0f, -2682.0f, -2683.0f, -2684.0f, -2685.0f, -2686.0f, -2687.0f, -2688.0f, -2689.0f, -2690.0f, -2691.0f, -2692.0f, -2693.0f, -2694.0f, -2695.0f, -2696.0f, -2697.0f, -2698.0f, -2699.0f, -2700.0f, -2701.0f, -2702.0f, -2703.0f, -2704.0f, -2705.0f, -2706.0f, -2707.0f, -2708.0f, -2709.0f, -2710.0f, -2711.0f, -2712.0f, -2713.0f, -2714.0f, -2715.0f, -2716.0f, -2717.0f, -2718.0f, -2719.0f, -2720.0f, -2721.0f, -2722.0f, -2723.0f, -2724.0f, -2725.0f, -2726.0f, -2727.0f, -2728.0f, -2729.0f, -2730.0f, -2731.0f, -2732.0f, -2733.0f, -2734.0f, -2735.0f, -2736.0f, -2737.0f, -2738.0f, -2739.0f, -2740.0f, -2741.0f, -2742.0f, -2743.0f, -2744.0f, -2745.0f, -2746.0f, -2747.0f, -2748.0f, -2749.0f, -2750.0f, -2751.0f, -2752.0f, -2753.0f, -2754.0f, -2755.0f, -2756.0f, -2757.0f, -2758.0f, -2759.0f, -2760.0f, -2761.0f, -2762.0f, -2763.0f, -2764.0f, -2765.0f, -2766.0f, -2767.0f, -2768.0f, -2769.0f, -2770.0f, -2771.0f, -2772.0f, -2773.0f, -2774.0f, -2775.0f, -2776.0f, -2777.0f, -2778.0f, -2779.0f, -2780.0f, -2781.0f, -2782.0f, -2783.0f, -2784.0f, -2785.0f, -2786.0f, -2787.0f, -2788.0f, -2789.0f, -2790.0f, -2791.0f, -2792.0f, -2793.0f, -2794.0f, -2795.0f, -2796.0f, -2797.0f, -2798.0f, -2799.0f, -2800.0f, -2801.0f, -2802.0f, -2803.0f, -2804.0f, -2805.0f, -2806.0f, -2807.0f, -2808.0f, -2809.0f, -2810.0f, -2811.0f, -2812.0f, -2813.0f, -2814.0f, -2815.0f, -2816.0f, -2817.0f, -2818.0f, -2819.0f, -2820.0f, -2821.0f, -2822.0f, -2823.0f, -2824.0f, -2825.0f, -2826.0f, -2827.0f, -2828.0f, -2829.0f, -2830.0f, -2831.0f, -2832.0f, -2833.0f, -2834.0f, -2835.0f, -2836.0f, -2837.0f, -2838.0f, -2839.0f, -2840.0f, -2841.0f, -2842.0f, -2843.0f, -2844.0f, -2845.0f, -2846.0f, -2847.0f, -2848.0f, -2849.0f, -2850.0f, -2851.0f, -2852.0f, -2853.0f, -2854.0f, -2855.0f, -2856.0f, -2857.0f, -2858.0f, -2859.0f, -2860.0f, -2861.0f, -2862.0f, -2863.0f, -2864.0f, -2865.0f, -2866.0f, -2867.0f, -2868.0f, -2869.0f, -2870.0f, -2871.0f, -2872.0f, -2873.0f, -2874.0f, -2875.0f, -2876.0f, -2877.0f, -2878.0f, -2879.0f, -2880.0f, -2881.0f, -2882.0f, -2883.0f, -2884.0f, -2885.0f, -2886.0f, -2887.0f, -2888.0f, -2889.0f, -2890.0f, -2891.0f, -2892.0f, -2893.0f, -2894.0f, -2895.0f, -2896.0f, -2897.0f, -2898.0f, -2899.0f, -2900.0f, -2901.0f, -2902.0f, -2903.0f, -2904.0f, -2905.0f, -2906.0f, -2907.0f, -2908.0f, -2909.0f, -2910.0f, -2911.0f, -2912.0f, -2913.0f, -2914.0f, -2915.0f, -2916.0f, -2917.0f, -2918.0f, -2919.0f, -2920.0f, -2921.0f, -2922.0f, -2923.0f, -2924.0f, -2925.0f, -2926.0f, -2927.0f, -2928.0f, -2929.0f, -2930.0f, -2931.0f, -2932.0f, -2933.0f, -2934.0f, -2935.0f, -2936.0f, -2937.0f, -2938.0f, -2939.0f, -2940.0f, -2941.0f, -2942.0f, -2943.0f, -2944.0f, -2945.0f, -2946.0f, -2947.0f, -2948.0f, -2949.0f, -2950.0f, -2951.0f, -2952.0f, -2953.0f, -2954.0f, -2955.0f, -2956.0f, -2957.0f, -2958.0f, -2959.0f, -2960.0f, -2961.0f, -2962.0f, -2963.0f, -2964.0f, -2965.0f, -2966.0f, -2967.0f, -2968.0f, -2969.0f, -2970.0f, -2971.0f, -2972.0f, -2973.0f, -2974.0f, -2975.0f, -2976.0f, -2977.0f, -2978.0f, -2979.0f, -2980.0f, -2981.0f, -2982.0f, -2983.0f, -2984.0f, -2985.0f, -2986.0f, -2987.0f, -2988.0f, -2989.0f, -2990.0f, -2991.0f, -2992.0f, -2993.0f, -2994.0f, -2995.0f, -2996.0f, -2997.0f, -2998.0f, -2999.0f, -3000.0f, -3001.0f, -3002.0f, -3003.0f, -3004.0f, -3005.0f, -3006.0f, -3007.0f, -3008.0f, -3009.0f, -3010.0f, -3011.0f, -3012.0f, -3013.0f, -3014.0f, -3015.0f, -3016.0f, -3017.0f, -3018.0f, -3019.0f, -3020.0f, -3021.0f, -3022.0f, -3023.0f, -3024.0f, -3025.0f, -3026.0f, -3027.0f, -3028.0f, -3029.0f, -3030.0f, -3031.0f, -3032.0f, -3033.0f, -3034.0f, -3035.0f, -3036.0f, -3037.0f, -3038.0f, -3039.0f, -3040.0f, -3041.0f, -3042.0f, -3043.0f, -3044.0f, -3045.0f, -3046.0f, -3047.0f, -3048.0f, -3049.0f, -3050.0f, -3051.0f, -3052.0f, -3053.0f, -3054.0f, -3055.0f, -3056.0f, -3057.0f, -3058.0f, -3059.0f, -3060.0f, -3061.0f, -3062.0f, -3063.0f, -3064.0f, -3065.0f, -3066.0f, -3067.0f, -3068.0f, -3069.0f, -3070.0f, -3071.0f, -3072.0f, -3073.0f, -3074.0f, -3075.0f, -3076.0f, -3077.0f, -3078.0f, -3079.0f, -3080.0f, -3081.0f, -3082.0f, -3083.0f, -3084.0f, -3085.0f, -3086.0f, -3087.0f, -3088.0f, -3089.0f, -3090.0f, -3091.0f, -3092.0f, -3093.0f, -3094.0f, -3095.0f, -3096.0f, -3097.0f, -3098.0f, -3099.0f, -3100.0f, -3101.0f, -3102.0f, -3103.0f, -3104.0f, -3105.0f, -3106.0f, -3107.0f, -3108.0f, -3109.0f, -3110.0f, -3111.0f, -3112.0f, -3113.0f, -3114.0f, -3115.0f, -3116.0f, -3117.0f, -3118.0f, -3119.0f, -3120.0f, -3121.0f, -3122.0f, -3123.0f, -3124.0f, -3125.0f, -3126.0f, -3127.0f, -3128.0f, -3129.0f, -3130.0f, -3131.0f, -3132.0f, -3133.0f, -3134.0f, -3135.0f, -3136.0f, -3137.0f, -3138.0f, -3139.0f, -3140.0f, -3141.0f, -3142.0f, -3143.0f, -3144.0f, -3145.0f, -3146.0f, -3147.0f, -3148.0f, -3149.0f, -3150.0f, -3151.0f, -3152.0f, -3153.0f, -3154.0f, -3155.0f, -3156.0f, -3157.0f, -3158.0f, -3159.0f, -3160.0f, -3161.0f, -3162.0f, -3163.0f, -3164.0f, -3165.0f, -3166.0f, -3167.0f, -3168.0f, -3169.0f, -3170.0f, -3171.0f, -3172.0f, -3173.0f, -3174.0f, -3175.0f, -3176.0f, -3177.0f, -3178.0f, -3179.0f, -3180.0f, -3181.0f, -3182.0f, -3183.0f, -3184.0f, -3185.0f, -3186.0f, -3187.0f, -3188.0f, -3189.0f, -3190.0f, -3191.0f, -3192.0f, -3193.0f, -3194.0f, -3195.0f, -3196.0f, -3197.0f, -3198.0f, -3199.0f, -3200.0f, -3201.0f, -3202.0f, -3203.0f, -3204.0f, -3205.0f, -3206.0f, -3207.0f, -3208.0f, -3209.0f, -3210.0f, -3211.0f, -3212.0f, -3213.0f, -3214.0f, -3215.0f, -3216.0f, -3217.0f, -3218.0f, -3219.0f, -3220.0f, -3221.0f, -3222.0f, -3223.0f, -3224.0f, -3225.0f, -3226.0f, -3227.0f, -3228.0f, -3229.0f, -3230.0f, -3231.0f, -3232.0f, -3233.0f, -3234.0f, -3235.0f, -3236.0f, -3237.0f, -3238.0f, -3239.0f, -3240.0f, -3241.0f, -3242.0f, -3243.0f, -3244.0f, -3245.0f, -3246.0f, -3247.0f, -3248.0f, -3249.0f, -3250.0f, -3251.0f, -3252.0f, -3253.0f, -3254.0f, -3255.0f, -3256.0f, -3257.0f, -3258.0f, -3259.0f, -3260.0f, -3261.0f, -3262.0f, -3263.0f, -3264.0f, -3265.0f, -3266.0f, -3267.0f, -3268.0f, -3269.0f, -3270.0f, -3271.0f, -3272.0f, -3273.0f, -3274.0f, -3275.0f, -3276.0f, -3277.0f, -3278.0f, -3279.0f, -3280.0f, -3281.0f, -3282.0f, -3283.0f, -3284.0f, -3285.0f, -3286.0f, -3287.0f, -3288.0f, -3289.0f, -3290.0f, -3291.0f, -3292.0f, -3293.0f, -3294.0f, -3295.0f, -3296.0f, -3297.0f, -3298.0f, -3299.0f, -3300.0f, -3301.0f, -3302.0f, -3303.0f, -3304.0f, -3305.0f, -3306.0f, -3307.0f, -3308.0f, -3309.0f, -3310.0f, -3311.0f, -3312.0f, -3313.0f, -3314.0f, -3315.0f, -3316.0f, -3317.0f, -3318.0f, -3319.0f, -3320.0f, -3321.0f, -3322.0f, -3323.0f, -3324.0f, -3325.0f, -3326.0f, -3327.0f, -3328.0f, -3329.0f, -3330.0f, -3331.0f, -3332.0f, -3333.0f, -3334.0f, -3335.0f, -3336.0f, -3337.0f, -3338.0f, -3339.0f, -3340.0f, -3341.0f, -3342.0f, -3343.0f, -3344.0f, -3345.0f, -3346.0f, -3347.0f, -3348.0f, -3349.0f, -3350.0f, -3351.0f, -3352.0f, -3353.0f, -3354.0f, -3355.0f, -3356.0f, -3357.0f, -3358.0f, -3359.0f, -3360.0f, -3361.0f, -3362.0f, -3363.0f, -3364.0f, -3365.0f, -3366.0f, -3367.0f, -3368.0f, -3369.0f, -3370.0f, -3371.0f, -3372.0f, -3373.0f, -3374.0f, -3375.0f, -3376.0f, -3377.0f, -3378.0f, -3379.0f, -3380.0f, -3381.0f, -3382.0f, -3383.0f, -3384.0f, -3385.0f, -3386.0f, -3387.0f, -3388.0f, -3389.0f, -3390.0f, -3391.0f, -3392.0f, -3393.0f, -3394.0f, -3395.0f, -3396.0f, -3397.0f, -3398.0f, -3399.0f, -3400.0f, -3401.0f, -3402.0f, -3403.0f, -3404.0f, -3405.0f, -3406.0f, -3407.0f, -3408.0f, -3409.0f, -3410.0f, -3411.0f, -3412.0f, -3413.0f, -3414.0f, -3415.0f, -3416.0f, -3417.0f, -3418.0f, -3419.0f, -3420.0f, -3421.0f, -3422.0f, -3423.0f, -3424.0f, -3425.0f, -3426.0f, -3427.0f, -3428.0f, -3429.0f, -3430.0f, -3431.0f, -3432.0f, -3433.0f, -3434.0f, -3435.0f, -3436.0f, -3437.0f, -3438.0f, -3439.0f, -3440.0f, -3441.0f, -3442.0f, -3443.0f, -3444.0f, -3445.0f, -3446.0f, -3447.0f, -3448.0f, -3449.0f, -3450.0f, -3451.0f, -3452.0f, -3453.0f, -3454.0f, -3455.0f, -3456.0f, -3457.0f, -3458.0f, -3459.0f, -3460.0f, -3461.0f, -3462.0f, -3463.0f, -3464.0f, -3465.0f, -3466.0f, -3467.0f, -3468.0f, -3469.0f, -3470.0f, -3471.0f, -3472.0f, -3473.0f, -3474.0f, -3475.0f, -3476.0f, -3477.0f, -3478.0f, -3479.0f, -3480.0f, -3481.0f, -3482.0f, -3483.0f, -3484.0f, -3485.0f, -3486.0f, -3487.0f, -3488.0f, -3489.0f, -3490.0f, -3491.0f, -3492.0f, -3493.0f, -3494.0f, -3495.0f, -3496.0f, -3497.0f, -3498.0f, -3499.0f, -3500.0f, -3501.0f, -3502.0f, -3503.0f, -3504.0f, -3505.0f, -3506.0f, -3507.0f, -3508.0f, -3509.0f, -3510.0f, -3511.0f, -3512.0f, -3513.0f, -3514.0f, -3515.0f, -3516.0f, -3517.0f, -3518.0f, -3519.0f, -3520.0f, -3521.0f, -3522.0f, -3523.0f, -3524.0f, -3525.0f, -3526.0f, -3527.0f, -3528.0f, -3529.0f, -3530.0f, -3531.0f, -3532.0f, -3533.0f, -3534.0f, -3535.0f, -3536.0f, -3537.0f, -3538.0f, -3539.0f, -3540.0f, -3541.0f, -3542.0f, -3543.0f, -3544.0f, -3545.0f, -3546.0f, -3547.0f, -3548.0f, -3549.0f, -3550.0f, -3551.0f, -3552.0f, -3553.0f, -3554.0f, -3555.0f, -3556.0f, -3557.0f, -3558.0f, -3559.0f, -3560.0f, -3561.0f, -3562.0f, -3563.0f, -3564.0f, -3565.0f, -3566.0f, -3567.0f, -3568.0f, -3569.0f, -3570.0f, -3571.0f, -3572.0f, -3573.0f, -3574.0f, -3575.0f, -3576.0f, -3577.0f, -3578.0f, -3579.0f, -3580.0f, -3581.0f, -3582.0f, -3583.0f, -3584.0f, -3585.0f, -3586.0f, -3587.0f, -3588.0f, -3589.0f, -3590.0f, -3591.0f, -3592.0f, -3593.0f, -3594.0f, -3595.0f, -3596.0f, -3597.0f, -3598.0f, -3599.0f, -3600.0f, -3601.0f, -3602.0f, -3603.0f, -3604.0f, -3605.0f, -3606.0f, -3607.0f, -3608.0f, -3609.0f, -3610.0f, -3611.0f, -3612.0f, -3613.0f, -3614.0f, -3615.0f, -3616.0f, -3617.0f, -3618.0f, -3619.0f, -3620.0f, -3621.0f, -3622.0f, -3623.0f, -3624.0f, -3625.0f, -3626.0f, -3627.0f, -3628.0f, -3629.0f, -3630.0f, -3631.0f, -3632.0f, -3633.0f, -3634.0f, -3635.0f, -3636.0f, -3637.0f, -3638.0f, -3639.0f, -3640.0f, -3641.0f, -3642.0f, -3643.0f, -3644.0f, -3645.0f, -3646.0f, -3647.0f, -3648.0f, -3649.0f, -3650.0f, -3651.0f, -3652.0f, -3653.0f, -3654.0f, -3655.0f, -3656.0f, -3657.0f, -3658.0f, -3659.0f, -3660.0f, -3661.0f, -3662.0f, -3663.0f, -3664.0f, -3665.0f, -3666.0f, -3667.0f, -3668.0f, -3669.0f, -3670.0f, -3671.0f, -3672.0f, -3673.0f, -3674.0f, -3675.0f, -3676.0f, -3677.0f, -3678.0f, -3679.0f, -3680.0f, -3681.0f, -3682.0f, -3683.0f, -3684.0f, -3685.0f, -3686.0f, -3687.0f, -3688.0f, -3689.0f, -3690.0f, -3691.0f, -3692.0f, -3693.0f, -3694.0f, -3695.0f, -3696.0f, -3697.0f, -3698.0f, -3699.0f, -3700.0f, -3701.0f, -3702.0f, -3703.0f, -3704.0f, -3705.0f, -3706.0f, -3707.0f, -3708.0f, -3709.0f, -3710.0f, -3711.0f, -3712.0f, -3713.0f, -3714.0f, -3715.0f, -3716.0f, -3717.0f, -3718.0f, -3719.0f, -3720.0f, -3721.0f, -3722.0f, -3723.0f, -3724.0f, -3725.0f, -3726.0f, -3727.0f, -3728.0f, -3729.0f, -3730.0f, -3731.0f, -3732.0f, -3733.0f, -3734.0f, -3735.0f, -3736.0f, -3737.0f, -3738.0f, -3739.0f, -3740.0f, -3741.0f, -3742.0f, -3743.0f, -3744.0f, -3745.0f, -3746.0f, -3747.0f, -3748.0f, -3749.0f, -3750.0f, -3751.0f, -3752.0f, -3753.0f, -3754.0f, -3755.0f, -3756.0f, -3757.0f, -3758.0f, -3759.0f, -3760.0f, -3761.0f, -3762.0f, -3763.0f, -3764.0f, -3765.0f, -3766.0f, -3767.0f, -3768.0f, -3769.0f, -3770.0f, -3771.0f, -3772.0f, -3773.0f, -3774.0f, -3775.0f, -3776.0f, -3777.0f, -3778.0f, -3779.0f, -3780.0f, -3781.0f, -3782.0f, -3783.0f, -3784.0f, -3785.0f, -3786.0f, -3787.0f, -3788.0f, -3789.0f, -3790.0f, -3791.0f, -3792.0f, -3793.0f, -3794.0f, -3795.0f, -3796.0f, -3797.0f, -3798.0f, -3799.0f, -3800.0f, -3801.0f, -3802.0f, -3803.0f, -3804.0f, -3805.0f, -3806.0f, -3807.0f, -3808.0f, -3809.0f, -3810.0f, -3811.0f, -3812.0f, -3813.0f, -3814.0f, -3815.0f, -3816.0f, -3817.0f, -3818.0f, -3819.0f, -3820.0f, -3821.0f, -3822.0f, -3823.0f, -3824.0f, -3825.0f, -3826.0f, -3827.0f, -3828.0f, -3829.0f, -3830.0f, -3831.0f, -3832.0f, -3833.0f, -3834.0f, -3835.0f, -3836.0f, -3837.0f, -3838.0f, -3839.0f, -3840.0f, -3841.0f, -3842.0f, -3843.0f, -3844.0f, -3845.0f, -3846.0f, -3847.0f, -3848.0f, -3849.0f, -3850.0f, -3851.0f, -3852.0f, -3853.0f, -3854.0f, -3855.0f, -3856.0f, -3857.0f, -3858.0f, -3859.0f, -3860.0f, -3861.0f, -3862.0f, -3863.0f, -3864.0f, -3865.0f, -3866.0f, -3867.0f, -3868.0f, -3869.0f, -3870.0f, -3871.0f, -3872.0f, -3873.0f, -3874.0f, -3875.0f, -3876.0f, -3877.0f, -3878.0f, -3879.0f, -3880.0f, -3881.0f, -3882.0f, -3883.0f, -3884.0f, -3885.0f, -3886.0f, -3887.0f, -3888.0f, -3889.0f, -3890.0f, -3891.0f, -3892.0f, -3893.0f, -3894.0f, -3895.0f, -3896.0f, -3897.0f, -3898.0f, -3899.0f, -3900.0f, -3901.0f, -3902.0f, -3903.0f, -3904.0f, -3905.0f, -3906.0f, -3907.0f, -3908.0f, -3909.0f, -3910.0f, -3911.0f, -3912.0f, -3913.0f, -3914.0f, -3915.0f, -3916.0f, -3917.0f, -3918.0f, -3919.0f, -3920.0f, -3921.0f, -3922.0f, -3923.0f, -3924.0f, -3925.0f, -3926.0f, -3927.0f, -3928.0f, -3929.0f, -3930.0f, -3931.0f, -3932.0f, -3933.0f, -3934.0f, -3935.0f, -3936.0f, -3937.0f, -3938.0f, -3939.0f, -3940.0f, -3941.0f, -3942.0f, -3943.0f, -3944.0f, -3945.0f, -3946.0f, -3947.0f, -3948.0f, -3949.0f, -3950.0f, -3951.0f, -3952.0f, -3953.0f, -3954.0f, -3955.0f, -3956.0f, -3957.0f, -3958.0f, -3959.0f, -3960.0f, -3961.0f, -3962.0f, -3963.0f, -3964.0f, -3965.0f, -3966.0f, -3967.0f, -3968.0f, -3969.0f, -3970.0f, -3971.0f, -3972.0f, -3973.0f, -3974.0f, -3975.0f, -3976.0f, -3977.0f, -3978.0f, -3979.0f, -3980.0f, -3981.0f, -3982.0f, -3983.0f, -3984.0f, -3985.0f, -3986.0f, -3987.0f, -3988.0f, -3989.0f, -3990.0f, -3991.0f, -3992.0f, -3993.0f, -3994.0f, -3995.0f, -3996.0f, -3997.0f, -3998.0f, -3999.0f, -4000.0f, -4001.0f, -4002.0f, -4003.0f, -4004.0f, -4005.0f, -4006.0f, -4007.0f, -4008.0f, -4009.0f, -4010.0f, -4011.0f, -4012.0f, -4013.0f, -4014.0f, -4015.0f, -4016.0f, -4017.0f, -4018.0f, -4019.0f, -4020.0f, -4021.0f, -4022.0f, -4023.0f, -4024.0f, -4025.0f, -4026.0f, -4027.0f, -4028.0f, -4029.0f, -4030.0f, -4031.0f, -4032.0f, -4033.0f, -4034.0f, -4035.0f, -4036.0f, -4037.0f, -4038.0f, -4039.0f, -4040.0f, -4041.0f, -4042.0f, -4043.0f, -4044.0f, -4045.0f, -4046.0f, -4047.0f, -4048.0f, -4049.0f, -4050.0f, -4051.0f, -4052.0f, -4053.0f, -4054.0f, -4055.0f, -4056.0f, -4057.0f, -4058.0f, -4059.0f, -4060.0f, -4061.0f, -4062.0f, -4063.0f, -4064.0f, -4065.0f, -4066.0f, -4067.0f, -4068.0f, -4069.0f, -4070.0f, -4071.0f, -4072.0f, -4073.0f, -4074.0f, -4075.0f, -4076.0f, -4077.0f, -4078.0f, -4079.0f, -4080.0f, -4081.0f, -4082.0f, -4083.0f, -4084.0f, -4085.0f, -4086.0f, -4087.0f, -4088.0f, -4089.0f, -4090.0f, -4091.0f, -4092.0f, -4093.0f, -4094.0f, -4095.0f, -4096.0f, -4097.0f, -4098.0f, -4099.0f, -4100.0f, -4101.0f, -4102.0f, -4103.0f, -4104.0f, -4105.0f, -4106.0f, -4107.0f, -4108.0f, -4109.0f, -4110.0f, -4111.0f, -4112.0f, -4113.0f, -4114.0f, -4115.0f, -4116.0f, -4117.0f, -4118.0f, -4119.0f, -4120.0f, -4121.0f, -4122.0f, -4123.0f, -4124.0f, -4125.0f, -4126.0f, -4127.0f, -4128.0f, -4129.0f, -4130.0f, -4131.0f, -4132.0f, -4133.0f, -4134.0f, -4135.0f, -4136.0f, -4137.0f, -4138.0f, -4139.0f, -4140.0f, -4141.0f, -4142.0f, -4143.0f, -4144.0f, -4145.0f, -4146.0f, -4147.0f, -4148.0f, -4149.0f, -4150.0f, -4151.0f, -4152.0f, -4153.0f, -4154.0f, -4155.0f, -4156.0f, -4157.0f, -4158.0f, -4159.0f, -4160.0f, -4161.0f, -4162.0f, -4163.0f, -4164.0f, -4165.0f, -4166.0f, -4167.0f, -4168.0f, -4169.0f, -4170.0f, -4171.0f, -4172.0f, -4173.0f, -4174.0f, -4175.0f, -4176.0f, -4177.0f, -4178.0f, -4179.0f, -4180.0f, -4181.0f, -4182.0f, -4183.0f, -4184.0f, -4185.0f, -4186.0f, -4187.0f, -4188.0f, -4189.0f, -4190.0f, -4191.0f, -4192.0f, -4193.0f, -4194.0f, -4195.0f, -4196.0f, -4197.0f, -4198.0f, -4199.0f, -4200.0f, -4201.0f, -4202.0f, -4203.0f, -4204.0f, -4205.0f, -4206.0f, -4207.0f, -4208.0f, -4209.0f, -4210.0f, -4211.0f, -4212.0f, -4213.0f, -4214.0f, -4215.0f, -4216.0f, -4217.0f, -4218.0f, -4219.0f, -4220.0f, -4221.0f, -4222.0f, -4223.0f, -4224.0f, -4225.0f, -4226.0f, -4227.0f, -4228.0f, -4229.0f, -4230.0f, -4231.0f, -4232.0f, -4233.0f, -4234.0f, -4235.0f, -4236.0f, -4237.0f, -4238.0f, -4239.0f, -4240.0f, -4241.0f, -4242.0f, -4243.0f, -4244.0f, -4245.0f, -4246.0f, -4247.0f, -4248.0f, -4249.0f, -4250.0f, -4251.0f, -4252.0f, -4253.0f, -4254.0f, -4255.0f, -4256.0f, -4257.0f, -4258.0f, -4259.0f, -4260.0f, -4261.0f, -4262.0f, -4263.0f, -4264.0f, -4265.0f, -4266.0f, -4267.0f, -4268.0f, -4269.0f, -4270.0f, -4271.0f, -4272.0f, -4273.0f, -4274.0f, -4275.0f, -4276.0f, -4277.0f, -4278.0f, -4279.0f, -4280.0f, -4281.0f, -4282.0f, -4283.0f, -4284.0f, -4285.0f, -4286.0f, -4287.0f, -4288.0f, -4289.0f, -4290.0f, -4291.0f, -4292.0f, -4293.0f, -4294.0f, -4295.0f, -4296.0f, -4297.0f, -4298.0f, -4299.0f, -4300.0f, -4301.0f, -4302.0f, -4303.0f, -4304.0f, -4305.0f, -4306.0f, -4307.0f, -4308.0f, -4309.0f, -4310.0f, -4311.0f, -4312.0f, -4313.0f, -4314.0f, -4315.0f, -4316.0f, -4317.0f, -4318.0f, -4319.0f, -4320.0f, -4321.0f, -4322.0f, -4323.0f, -4324.0f, -4325.0f, -4326.0f, -4327.0f, -4328.0f, -4329.0f, -4330.0f, -4331.0f, -4332.0f, -4333.0f, -4334.0f, -4335.0f, -4336.0f, -4337.0f, -4338.0f, -4339.0f, -4340.0f, -4341.0f, -4342.0f, -4343.0f, -4344.0f, -4345.0f, -4346.0f, -4347.0f, -4348.0f, -4349.0f, -4350.0f, -4351.0f, -4352.0f, -4353.0f, -4354.0f, -4355.0f, -4356.0f, -4357.0f, -4358.0f, -4359.0f, -4360.0f, -4361.0f, -4362.0f, -4363.0f, -4364.0f, -4365.0f, -4366.0f, -4367.0f, -4368.0f, -4369.0f, -4370.0f, -4371.0f, -4372.0f, -4373.0f, -4374.0f, -4375.0f, -4376.0f, -4377.0f, -4378.0f, -4379.0f, -4380.0f, -4381.0f, -4382.0f, -4383.0f, -4384.0f, -4385.0f, -4386.0f, -4387.0f, -4388.0f, -4389.0f, -4390.0f, -4391.0f, -4392.0f, -4393.0f, -4394.0f, -4395.0f, -4396.0f, -4397.0f, -4398.0f, -4399.0f, -4400.0f, -4401.0f, -4402.0f, -4403.0f, -4404.0f, -4405.0f, -4406.0f, -4407.0f, -4408.0f, -4409.0f, -4410.0f, -4411.0f, -4412.0f, -4413.0f, -4414.0f, -4415.0f, -4416.0f, -4417.0f, -4418.0f, -4419.0f, -4420.0f, -4421.0f, -4422.0f, -4423.0f, -4424.0f, -4425.0f, -4426.0f, -4427.0f, -4428.0f, -4429.0f, -4430.0f, -4431.0f, -4432.0f, -4433.0f, -4434.0f, -4435.0f, -4436.0f, -4437.0f, -4438.0f, -4439.0f, -4440.0f, -4441.0f, -4442.0f, -4443.0f, -4444.0f, -4445.0f, -4446.0f, -4447.0f, -4448.0f, -4449.0f, -4450.0f, -4451.0f, -4452.0f, -4453.0f, -4454.0f, -4455.0f, -4456.0f, -4457.0f, -4458.0f, -4459.0f, -4460.0f, -4461.0f, -4462.0f, -4463.0f, -4464.0f, -4465.0f, -4466.0f, -4467.0f, -4468.0f, -4469.0f, -4470.0f, -4471.0f, -4472.0f, -4473.0f, -4474.0f, -4475.0f, -4476.0f, -4477.0f, -4478.0f, -4479.0f, -4480.0f, -4481.0f, -4482.0f, -4483.0f, -4484.0f, -4485.0f, -4486.0f, -4487.0f, -4488.0f, -4489.0f, -4490.0f, -4491.0f, -4492.0f, -4493.0f, -4494.0f, -4495.0f, -4496.0f, -4497.0f, -4498.0f, -4499.0f, -4500.0f, -4501.0f, -4502.0f, -4503.0f, -4504.0f, -4505.0f, -4506.0f, -4507.0f, -4508.0f, -4509.0f, -4510.0f, -4511.0f, -4512.0f, -4513.0f, -4514.0f, -4515.0f, -4516.0f, -4517.0f, -4518.0f, -4519.0f, -4520.0f, -4521.0f, -4522.0f, -4523.0f, -4524.0f, -4525.0f, -4526.0f, -4527.0f, -4528.0f, -4529.0f, -4530.0f, -4531.0f, -4532.0f, -4533.0f, -4534.0f, -4535.0f, -4536.0f, -4537.0f, -4538.0f, -4539.0f, -4540.0f, -4541.0f, -4542.0f, -4543.0f, -4544.0f, -4545.0f, -4546.0f, -4547.0f, -4548.0f, -4549.0f, -4550.0f, -4551.0f, -4552.0f, -4553.0f, -4554.0f, -4555.0f, -4556.0f, -4557.0f, -4558.0f, -4559.0f, -4560.0f, -4561.0f, -4562.0f, -4563.0f, -4564.0f, -4565.0f, -4566.0f, -4567.0f, -4568.0f, -4569.0f, -4570.0f, -4571.0f, -4572.0f, -4573.0f, -4574.0f, -4575.0f, -4576.0f, -4577.0f, -4578.0f, -4579.0f, -4580.0f, -4581.0f, -4582.0f, -4583.0f, -4584.0f, -4585.0f, -4586.0f, -4587.0f, -4588.0f, -4589.0f, -4590.0f, -4591.0f, -4592.0f, -4593.0f, -4594.0f, -4595.0f, -4596.0f, -4597.0f, -4598.0f, -4599.0f, -4600.0f, -4601.0f, -4602.0f, -4603.0f, -4604.0f, -4605.0f, -4606.0f, -4607.0f, -4608.0f, -4609.0f, -4610.0f, -4611.0f, -4612.0f, -4613.0f, -4614.0f, -4615.0f, -4616.0f, -4617.0f, -4618.0f, -4619.0f, -4620.0f, -4621.0f, -4622.0f, -4623.0f, -4624.0f, -4625.0f, -4626.0f, -4627.0f, -4628.0f, -4629.0f, -4630.0f, -4631.0f, -4632.0f, -4633.0f, -4634.0f, -4635.0f, -4636.0f, -4637.0f, -4638.0f, -4639.0f, -4640.0f, -4641.0f, -4642.0f, -4643.0f, -4644.0f, -4645.0f, -4646.0f, -4647.0f, -4648.0f, -4649.0f, -4650.0f, -4651.0f, -4652.0f, -4653.0f, -4654.0f, -4655.0f, -4656.0f, -4657.0f, -4658.0f, -4659.0f, -4660.0f, -4661.0f, -4662.0f, -4663.0f, -4664.0f, -4665.0f, -4666.0f, -4667.0f, -4668.0f, -4669.0f, -4670.0f, -4671.0f, -4672.0f, -4673.0f, -4674.0f, -4675.0f, -4676.0f, -4677.0f, -4678.0f, -4679.0f, -4680.0f, -4681.0f, -4682.0f, -4683.0f, -4684.0f, -4685.0f, -4686.0f, -4687.0f, -4688.0f, -4689.0f, -4690.0f, -4691.0f, -4692.0f, -4693.0f, -4694.0f, -4695.0f, -4696.0f, -4697.0f, -4698.0f, -4699.0f, -4700.0f, -4701.0f, -4702.0f, -4703.0f, -4704.0f, -4705.0f, -4706.0f, -4707.0f, -4708.0f, -4709.0f, -4710.0f, -4711.0f, -4712.0f, -4713.0f, -4714.0f, -4715.0f, -4716.0f, -4717.0f, -4718.0f, -4719.0f, -4720.0f, -4721.0f, -4722.0f, -4723.0f, -4724.0f, -4725.0f, -4726.0f, -4727.0f, -4728.0f, -4729.0f, -4730.0f, -4731.0f, -4732.0f, -4733.0f, -4734.0f, -4735.0f, -4736.0f, -4737.0f, -4738.0f, -4739.0f, -4740.0f, -4741.0f, -4742.0f, -4743.0f, -4744.0f, -4745.0f, -4746.0f, -4747.0f, -4748.0f, -4749.0f, -4750.0f, -4751.0f, -4752.0f, -4753.0f, -4754.0f, -4755.0f, -4756.0f, -4757.0f, -4758.0f, -4759.0f, -4760.0f, -4761.0f, -4762.0f, -4763.0f, -4764.0f, -4765.0f, -4766.0f, -4767.0f, -4768.0f, -4769.0f, -4770.0f, -4771.0f, -4772.0f, -4773.0f, -4774.0f, -4775.0f, -4776.0f, -4777.0f, -4778.0f, -4779.0f, -4780.0f, -4781.0f, -4782.0f, -4783.0f, -4784.0f, -4785.0f, -4786.0f, -4787.0f, -4788.0f, -4789.0f, -4790.0f, -4791.0f, -4792.0f, -4793.0f, -4794.0f, -4795.0f, -4796.0f, -4797.0f, -4798.0f, -4799.0f, -4800.0f, -4801.0f, -4802.0f, -4803.0f, -4804.0f, -4805.0f, -4806.0f, -4807.0f, -4808.0f, -4809.0f, -4810.0f, -4811.0f, -4812.0f, -4813.0f, -4814.0f, -4815.0f, -4816.0f, -4817.0f, -4818.0f, -4819.0f, -4820.0f, -4821.0f, -4822.0f, -4823.0f, -4824.0f, -4825.0f, -4826.0f, -4827.0f, -4828.0f, -4829.0f, -4830.0f, -4831.0f, -4832.0f, -4833.0f, -4834.0f, -4835.0f, -4836.0f, -4837.0f, -4838.0f, -4839.0f, -4840.0f, -4841.0f, -4842.0f, -4843.0f, -4844.0f, -4845.0f, -4846.0f, -4847.0f, -4848.0f, -4849.0f, -4850.0f, -4851.0f, -4852.0f, -4853.0f, -4854.0f, -4855.0f, -4856.0f, -4857.0f, -4858.0f, -4859.0f, -4860.0f, -4861.0f, -4862.0f, -4863.0f, -4864.0f, -4865.0f, -4866.0f, -4867.0f, -4868.0f, -4869.0f, -4870.0f, -4871.0f, -4872.0f, -4873.0f, -4874.0f, -4875.0f, -4876.0f, -4877.0f, -4878.0f, -4879.0f, -4880.0f, -4881.0f, -4882.0f, -4883.0f, -4884.0f, -4885.0f, -4886.0f, -4887.0f, -4888.0f, -4889.0f, -4890.0f, -4891.0f, -4892.0f, -4893.0f, -4894.0f, -4895.0f, -4896.0f, -4897.0f, -4898.0f, -4899.0f, -4900.0f, -4901.0f, -4902.0f, -4903.0f, -4904.0f, -4905.0f, -4906.0f, -4907.0f, -4908.0f, -4909.0f, -4910.0f, -4911.0f, -4912.0f, -4913.0f, -4914.0f, -4915.0f, -4916.0f, -4917.0f, -4918.0f, -4919.0f, -4920.0f, -4921.0f, -4922.0f, -4923.0f, -4924.0f, -4925.0f, -4926.0f, -4927.0f, -4928.0f, -4929.0f, -4930.0f, -4931.0f, -4932.0f, -4933.0f, -4934.0f, -4935.0f, -4936.0f, -4937.0f, -4938.0f, -4939.0f, -4940.0f, -4941.0f, -4942.0f, -4943.0f, -4944.0f, -4945.0f, -4946.0f, -4947.0f, -4948.0f, -4949.0f, -4950.0f, -4951.0f, -4952.0f, -4953.0f, -4954.0f, -4955.0f, -4956.0f, -4957.0f, -4958.0f, -4959.0f, -4960.0f, -4961.0f, -4962.0f, -4963.0f, -4964.0f, -4965.0f, -4966.0f, -4967.0f, -4968.0f, -4969.0f, -4970.0f, -4971.0f, -4972.0f, -4973.0f, -4974.0f, -4975.0f, -4976.0f, -4977.0f, -4978.0f, -4979.0f, -4980.0f, -4981.0f, -4982.0f, -4983.0f, -4984.0f, -4985.0f, -4986.0f, -4987.0f, -4988.0f, -4989.0f, -4990.0f, -4991.0f, -4992.0f, -4993.0f, -4994.0f, -4995.0f, -4996.0f, -4997.0f, -4998.0f, -4999.0f, -5000.0f, -5001.0f, -5002.0f, -5003.0f, -5004.0f, -5005.0f, -5006.0f, -5007.0f, -5008.0f, -5009.0f, -5010.0f, -5011.0f, -5012.0f, -5013.0f, -5014.0f, -5015.0f, -5016.0f, -5017.0f, -5018.0f, -5019.0f, -5020.0f, -5021.0f, -5022.0f, -5023.0f, -5024.0f, -5025.0f, -5026.0f, -5027.0f, -5028.0f, -5029.0f, -5030.0f, -5031.0f, -5032.0f, -5033.0f, -5034.0f, -5035.0f, -5036.0f, -5037.0f, -5038.0f, -5039.0f, -5040.0f, -5041.0f, -5042.0f, -5043.0f, -5044.0f, -5045.0f, -5046.0f, -5047.0f, -5048.0f, -5049.0f, -5050.0f, -5051.0f, -5052.0f, -5053.0f, -5054.0f, -5055.0f, -5056.0f, -5057.0f, -5058.0f, -5059.0f, -5060.0f, -5061.0f, -5062.0f, -5063.0f, -5064.0f, -5065.0f, -5066.0f, -5067.0f, -5068.0f, -5069.0f, -5070.0f, -5071.0f, -5072.0f, -5073.0f, -5074.0f, -5075.0f, -5076.0f, -5077.0f, -5078.0f, -5079.0f, -5080.0f, -5081.0f, -5082.0f, -5083.0f, -5084.0f, -5085.0f, -5086.0f, -5087.0f, -5088.0f, -5089.0f, -5090.0f, -5091.0f, -5092.0f, -5093.0f, -5094.0f, -5095.0f, -5096.0f, -5097.0f, -5098.0f, -5099.0f, -5100.0f, -5101.0f, -5102.0f, -5103.0f, -5104.0f, -5105.0f, -5106.0f, -5107.0f, -5108.0f, -5109.0f, -5110.0f, -5111.0f, -5112.0f, -5113.0f, -5114.0f, -5115.0f, -5116.0f, -5117.0f, -5118.0f, -5119.0f, -5120.0f, -5121.0f, -5122.0f, -5123.0f, -5124.0f, -5125.0f, -5126.0f, -5127.0f, -5128.0f, -5129.0f, -5130.0f, -5131.0f, -5132.0f, -5133.0f, -5134.0f, -5135.0f, -5136.0f, -5137.0f, -5138.0f, -5139.0f, -5140.0f, -5141.0f, -5142.0f, -5143.0f, -5144.0f, -5145.0f, -5146.0f, -5147.0f, -5148.0f, -5149.0f, -5150.0f, -5151.0f, -5152.0f, -5153.0f, -5154.0f, -5155.0f, -5156.0f, -5157.0f, -5158.0f, -5159.0f, -5160.0f, -5161.0f, -5162.0f, -5163.0f, -5164.0f, -5165.0f, -5166.0f, -5167.0f, -5168.0f, -5169.0f, -5170.0f, -5171.0f, -5172.0f, -5173.0f, -5174.0f, -5175.0f, -5176.0f, -5177.0f, -5178.0f, -5179.0f, -5180.0f, -5181.0f, -5182.0f, -5183.0f, -5184.0f, -5185.0f, -5186.0f, -5187.0f, -5188.0f, -5189.0f, -5190.0f, -5191.0f, -5192.0f, -5193.0f, -5194.0f, -5195.0f, -5196.0f, -5197.0f, -5198.0f, -5199.0f, -5200.0f, -5201.0f, -5202.0f, -5203.0f, -5204.0f, -5205.0f, -5206.0f, -5207.0f, -5208.0f, -5209.0f, -5210.0f, -5211.0f, -5212.0f, -5213.0f, -5214.0f, -5215.0f, -5216.0f, -5217.0f, -5218.0f, -5219.0f, -5220.0f, -5221.0f, -5222.0f, -5223.0f, -5224.0f, -5225.0f, -5226.0f, -5227.0f, -5228.0f, -5229.0f, -5230.0f, -5231.0f, -5232.0f, -5233.0f, -5234.0f, -5235.0f, -5236.0f, -5237.0f, -5238.0f, -5239.0f, -5240.0f, -5241.0f, -5242.0f, -5243.0f, -5244.0f, -5245.0f, -5246.0f, -5247.0f, -5248.0f, -5249.0f, -5250.0f, -5251.0f, -5252.0f, -5253.0f, -5254.0f, -5255.0f, -5256.0f, -5257.0f, -5258.0f, -5259.0f, -5260.0f, -5261.0f, -5262.0f, -5263.0f, -5264.0f, -5265.0f, -5266.0f, -5267.0f, -5268.0f, -5269.0f, -5270.0f, -5271.0f, -5272.0f, -5273.0f, -5274.0f, -5275.0f, -5276.0f, -5277.0f, -5278.0f, -5279.0f, -5280.0f, -5281.0f, -5282.0f, -5283.0f, -5284.0f, -5285.0f, -5286.0f, -5287.0f, -5288.0f, -5289.0f, -5290.0f, -5291.0f, -5292.0f, -5293.0f, -5294.0f, -5295.0f, -5296.0f, -5297.0f, -5298.0f, -5299.0f, -5300.0f, -5301.0f, -5302.0f, -5303.0f, -5304.0f, -5305.0f, -5306.0f, -5307.0f, -5308.0f, -5309.0f, -5310.0f, -5311.0f, -5312.0f, -5313.0f, -5314.0f, -5315.0f, -5316.0f, -5317.0f, -5318.0f, -5319.0f, -5320.0f, -5321.0f, -5322.0f, -5323.0f, -5324.0f, -5325.0f, -5326.0f, -5327.0f, -5328.0f, -5329.0f, -5330.0f, -5331.0f, -5332.0f, -5333.0f, -5334.0f, -5335.0f, -5336.0f, -5337.0f, -5338.0f, -5339.0f, -5340.0f, -5341.0f, -5342.0f, -5343.0f, -5344.0f, -5345.0f, -5346.0f, -5347.0f, -5348.0f, -5349.0f, -5350.0f, -5351.0f, -5352.0f, -5353.0f, -5354.0f, -5355.0f, -5356.0f, -5357.0f, -5358.0f, -5359.0f, -5360.0f, -5361.0f, -5362.0f, -5363.0f, -5364.0f, -5365.0f, -5366.0f, -5367.0f, -5368.0f, -5369.0f, -5370.0f, -5371.0f, -5372.0f, -5373.0f, -5374.0f, -5375.0f, -5376.0f, -5377.0f, -5378.0f, -5379.0f, -5380.0f, -5381.0f, -5382.0f, -5383.0f, -5384.0f, -5385.0f, -5386.0f, -5387.0f, -5388.0f, -5389.0f, -5390.0f, -5391.0f, -5392.0f, -5393.0f, -5394.0f, -5395.0f, -5396.0f, -5397.0f, -5398.0f, -5399.0f, -5400.0f, -5401.0f, -5402.0f, -5403.0f, -5404.0f, -5405.0f, -5406.0f, -5407.0f, -5408.0f, -5409.0f, -5410.0f, -5411.0f, -5412.0f, -5413.0f, -5414.0f, -5415.0f, -5416.0f, -5417.0f, -5418.0f, -5419.0f, -5420.0f, -5421.0f, -5422.0f, -5423.0f, -5424.0f, -5425.0f, -5426.0f, -5427.0f, -5428.0f, -5429.0f, -5430.0f, -5431.0f, -5432.0f, -5433.0f, -5434.0f, -5435.0f, -5436.0f, -5437.0f, -5438.0f, -5439.0f, -5440.0f, -5441.0f, -5442.0f, -5443.0f, -5444.0f, -5445.0f, -5446.0f, -5447.0f, -5448.0f, -5449.0f, -5450.0f, -5451.0f, -5452.0f, -5453.0f, -5454.0f, -5455.0f, -5456.0f, -5457.0f, -5458.0f, -5459.0f, -5460.0f, -5461.0f, -5462.0f, -5463.0f, -5464.0f, -5465.0f, -5466.0f, -5467.0f, -5468.0f, -5469.0f, -5470.0f, -5471.0f, -5472.0f, -5473.0f, -5474.0f, -5475.0f, -5476.0f, -5477.0f, -5478.0f, -5479.0f, -5480.0f, -5481.0f, -5482.0f, -5483.0f, -5484.0f, -5485.0f, -5486.0f, -5487.0f, -5488.0f, -5489.0f, -5490.0f, -5491.0f, -5492.0f, -5493.0f, -5494.0f, -5495.0f, -5496.0f, -5497.0f, -5498.0f, -5499.0f, -5500.0f, -5501.0f, -5502.0f, -5503.0f, -5504.0f, -5505.0f, -5506.0f, -5507.0f, -5508.0f, -5509.0f, -5510.0f, -5511.0f, -5512.0f, -5513.0f, -5514.0f, -5515.0f, -5516.0f, -5517.0f, -5518.0f, -5519.0f, -5520.0f, -5521.0f, -5522.0f, -5523.0f, -5524.0f, -5525.0f, -5526.0f, -5527.0f, -5528.0f, -5529.0f, -5530.0f, -5531.0f, -5532.0f, -5533.0f, -5534.0f, -5535.0f, -5536.0f, -5537.0f, -5538.0f, -5539.0f, -5540.0f, -5541.0f, -5542.0f, -5543.0f, -5544.0f, -5545.0f, -5546.0f, -5547.0f, -5548.0f, -5549.0f, -5550.0f, -5551.0f, -5552.0f, -5553.0f, -5554.0f, -5555.0f, -5556.0f, -5557.0f, -5558.0f, -5559.0f, -5560.0f, -5561.0f, -5562.0f, -5563.0f, -5564.0f, -5565.0f, -5566.0f, -5567.0f, -5568.0f, -5569.0f, -5570.0f, -5571.0f, -5572.0f, -5573.0f, -5574.0f, -5575.0f, -5576.0f, -5577.0f, -5578.0f, -5579.0f, -5580.0f, -5581.0f, -5582.0f, -5583.0f, -5584.0f, -5585.0f, -5586.0f, -5587.0f, -5588.0f, -5589.0f, -5590.0f, -5591.0f, -5592.0f, -5593.0f, -5594.0f, -5595.0f, -5596.0f, -5597.0f, -5598.0f, -5599.0f, -5600.0f, -5601.0f, -5602.0f, -5603.0f, -5604.0f, -5605.0f, -5606.0f, -5607.0f, -5608.0f, -5609.0f, -5610.0f, -5611.0f, -5612.0f, -5613.0f, -5614.0f, -5615.0f, -5616.0f, -5617.0f, -5618.0f, -5619.0f, -5620.0f, -5621.0f, -5622.0f, -5623.0f, -5624.0f, -5625.0f, -5626.0f, -5627.0f, -5628.0f, -5629.0f, -5630.0f, -5631.0f, -5632.0f, -5633.0f, -5634.0f, -5635.0f, -5636.0f, -5637.0f, -5638.0f, -5639.0f, -5640.0f, -5641.0f, -5642.0f, -5643.0f, -5644.0f, -5645.0f, -5646.0f, -5647.0f, -5648.0f, -5649.0f, -5650.0f, -5651.0f, -5652.0f, -5653.0f, -5654.0f, -5655.0f, -5656.0f, -5657.0f, -5658.0f, -5659.0f, -5660.0f, -5661.0f, -5662.0f, -5663.0f, -5664.0f, -5665.0f, -5666.0f, -5667.0f, -5668.0f, -5669.0f, -5670.0f, -5671.0f, -5672.0f, -5673.0f, -5674.0f, -5675.0f, -5676.0f, -5677.0f, -5678.0f, -5679.0f, -5680.0f, -5681.0f, -5682.0f, -5683.0f, -5684.0f, -5685.0f, -5686.0f, -5687.0f, -5688.0f, -5689.0f, -5690.0f, -5691.0f, -5692.0f, -5693.0f, -5694.0f, -5695.0f, -5696.0f, -5697.0f, -5698.0f, -5699.0f, -5700.0f, -5701.0f, -5702.0f, -5703.0f, -5704.0f, -5705.0f, -5706.0f, -5707.0f, -5708.0f, -5709.0f, -5710.0f, -5711.0f, -5712.0f, -5713.0f, -5714.0f, -5715.0f, -5716.0f, -5717.0f, -5718.0f, -5719.0f, -5720.0f, -5721.0f, -5722.0f, -5723.0f, -5724.0f, -5725.0f, -5726.0f, -5727.0f, -5728.0f, -5729.0f, -5730.0f, -5731.0f, -5732.0f, -5733.0f, -5734.0f, -5735.0f, -5736.0f, -5737.0f, -5738.0f, -5739.0f, -5740.0f, -5741.0f, -5742.0f, -5743.0f, -5744.0f, -5745.0f, -5746.0f, -5747.0f, -5748.0f, -5749.0f, -5750.0f, -5751.0f, -5752.0f, -5753.0f, -5754.0f, -5755.0f, -5756.0f, -5757.0f, -5758.0f, -5759.0f, -5760.0f, -5761.0f, -5762.0f, -5763.0f, -5764.0f, -5765.0f, -5766.0f, -5767.0f, -5768.0f, -5769.0f, -5770.0f, -5771.0f, -5772.0f, -5773.0f, -5774.0f, -5775.0f, -5776.0f, -5777.0f, -5778.0f, -5779.0f, -5780.0f, -5781.0f, -5782.0f, -5783.0f, -5784.0f, -5785.0f, -5786.0f, -5787.0f, -5788.0f, -5789.0f, -5790.0f, -5791.0f, -5792.0f, -5793.0f, -5794.0f, -5795.0f, -5796.0f, -5797.0f, -5798.0f, -5799.0f, -5800.0f, -5801.0f, -5802.0f, -5803.0f, -5804.0f, -5805.0f, -5806.0f, -5807.0f, -5808.0f, -5809.0f, -5810.0f, -5811.0f, -5812.0f, -5813.0f, -5814.0f, -5815.0f, -5816.0f, -5817.0f, -5818.0f, -5819.0f, -5820.0f, -5821.0f, -5822.0f, -5823.0f, -5824.0f, -5825.0f, -5826.0f, -5827.0f, -5828.0f, -5829.0f, -5830.0f, -5831.0f, -5832.0f, -5833.0f, -5834.0f, -5835.0f, -5836.0f, -5837.0f, -5838.0f, -5839.0f, -5840.0f, -5841.0f, -5842.0f, -5843.0f, -5844.0f, -5845.0f, -5846.0f, -5847.0f, -5848.0f, -5849.0f, -5850.0f, -5851.0f, -5852.0f, -5853.0f, -5854.0f, -5855.0f, -5856.0f, -5857.0f, -5858.0f, -5859.0f, -5860.0f, -5861.0f, -5862.0f, -5863.0f, -5864.0f, -5865.0f, -5866.0f, -5867.0f, -5868.0f, -5869.0f, -5870.0f, -5871.0f, -5872.0f, -5873.0f, -5874.0f, -5875.0f, -5876.0f, -5877.0f, -5878.0f, -5879.0f, -5880.0f, -5881.0f, -5882.0f, -5883.0f, -5884.0f, -5885.0f, -5886.0f, -5887.0f, -5888.0f, -5889.0f, -5890.0f, -5891.0f, -5892.0f, -5893.0f, -5894.0f, -5895.0f, -5896.0f, -5897.0f, -5898.0f, -5899.0f, -5900.0f, -5901.0f, -5902.0f, -5903.0f, -5904.0f, -5905.0f, -5906.0f, -5907.0f, -5908.0f, -5909.0f, -5910.0f, -5911.0f, -5912.0f, -5913.0f, -5914.0f, -5915.0f, -5916.0f, -5917.0f, -5918.0f, -5919.0f, -5920.0f, -5921.0f, -5922.0f, -5923.0f, -5924.0f, -5925.0f, -5926.0f, -5927.0f, -5928.0f, -5929.0f, -5930.0f, -5931.0f, -5932.0f, -5933.0f, -5934.0f, -5935.0f, -5936.0f, -5937.0f, -5938.0f, -5939.0f, -5940.0f, -5941.0f, -5942.0f, -5943.0f, -5944.0f, -5945.0f, -5946.0f, -5947.0f, -5948.0f, -5949.0f, -5950.0f, -5951.0f, -5952.0f, -5953.0f, -5954.0f, -5955.0f, -5956.0f, -5957.0f, -5958.0f, -5959.0f, -5960.0f, -5961.0f, -5962.0f, -5963.0f, -5964.0f, -5965.0f, -5966.0f, -5967.0f, -5968.0f, -5969.0f, -5970.0f, -5971.0f, -5972.0f, -5973.0f, -5974.0f, -5975.0f, -5976.0f, -5977.0f, -5978.0f, -5979.0f, -5980.0f, -5981.0f, -5982.0f, -5983.0f, -5984.0f, -5985.0f, -5986.0f, -5987.0f, -5988.0f, -5989.0f, -5990.0f, -5991.0f, -5992.0f, -5993.0f, -5994.0f, -5995.0f, -5996.0f, -5997.0f, -5998.0f, -5999.0f, -6000.0f, -6001.0f, -6002.0f, -6003.0f, -6004.0f, -6005.0f, -6006.0f, -6007.0f, -6008.0f, -6009.0f, -6010.0f, -6011.0f, -6012.0f, -6013.0f, -6014.0f, -6015.0f, -6016.0f, -6017.0f, -6018.0f, -6019.0f, -6020.0f, -6021.0f, -6022.0f, -6023.0f, -6024.0f, -6025.0f, -6026.0f, -6027.0f, -6028.0f, -6029.0f, -6030.0f, -6031.0f, -6032.0f, -6033.0f, -6034.0f, -6035.0f, -6036.0f, -6037.0f, -6038.0f, -6039.0f, -6040.0f, -6041.0f, -6042.0f, -6043.0f, -6044.0f, -6045.0f, -6046.0f, -6047.0f, -6048.0f, -6049.0f, -6050.0f, -6051.0f, -6052.0f, -6053.0f, -6054.0f, -6055.0f, -6056.0f, -6057.0f, -6058.0f, -6059.0f, -6060.0f, -6061.0f, -6062.0f, -6063.0f, -6064.0f, -6065.0f, -6066.0f, -6067.0f, -6068.0f, -6069.0f, -6070.0f, -6071.0f, -6072.0f, -6073.0f, -6074.0f, -6075.0f, -6076.0f, -6077.0f, -6078.0f, -6079.0f, -6080.0f, -6081.0f, -6082.0f, -6083.0f, -6084.0f, -6085.0f, -6086.0f, -6087.0f, -6088.0f, -6089.0f, -6090.0f, -6091.0f, -6092.0f, -6093.0f, -6094.0f, -6095.0f, -6096.0f, -6097.0f, -6098.0f, -6099.0f, -6100.0f, -6101.0f, -6102.0f, -6103.0f, -6104.0f, -6105.0f, -6106.0f, -6107.0f, -6108.0f, -6109.0f, -6110.0f, -6111.0f, -6112.0f, -6113.0f, -6114.0f, -6115.0f, -6116.0f, -6117.0f, -6118.0f, -6119.0f, -6120.0f, -6121.0f, -6122.0f, -6123.0f, -6124.0f, -6125.0f, -6126.0f, -6127.0f, -6128.0f, -6129.0f, -6130.0f, -6131.0f, -6132.0f, -6133.0f, -6134.0f, -6135.0f, -6136.0f, -6137.0f, -6138.0f, -6139.0f, -6140.0f, -6141.0f, -6142.0f, -6143.0f, -6144.0f, -6145.0f, -6146.0f, -6147.0f, -6148.0f, -6149.0f, -6150.0f, -6151.0f, -6152.0f, -6153.0f, -6154.0f, -6155.0f, -6156.0f, -6157.0f, -6158.0f, -6159.0f, -6160.0f, -6161.0f, -6162.0f, -6163.0f, -6164.0f, -6165.0f, -6166.0f, -6167.0f, -6168.0f, -6169.0f, -6170.0f, -6171.0f, -6172.0f, -6173.0f, -6174.0f, -6175.0f, -6176.0f, -6177.0f, -6178.0f, -6179.0f, -6180.0f, -6181.0f, -6182.0f, -6183.0f, -6184.0f, -6185.0f, -6186.0f, -6187.0f, -6188.0f, -6189.0f, -6190.0f, -6191.0f, -6192.0f, -6193.0f, -6194.0f, -6195.0f, -6196.0f, -6197.0f, -6198.0f, -6199.0f, -6200.0f, -6201.0f, -6202.0f, -6203.0f, -6204.0f, -6205.0f, -6206.0f, -6207.0f, -6208.0f, -6209.0f, -6210.0f, -6211.0f, -6212.0f, -6213.0f, -6214.0f, -6215.0f, -6216.0f, -6217.0f, -6218.0f, -6219.0f, -6220.0f, -6221.0f, -6222.0f, -6223.0f, -6224.0f, -6225.0f, -6226.0f, -6227.0f, -6228.0f, -6229.0f, -6230.0f, -6231.0f, -6232.0f, -6233.0f, -6234.0f, -6235.0f, -6236.0f, -6237.0f, -6238.0f, -6239.0f, -6240.0f, -6241.0f, -6242.0f, -6243.0f, -6244.0f, -6245.0f, -6246.0f, -6247.0f, -6248.0f, -6249.0f, -6250.0f, -6251.0f, -6252.0f, -6253.0f, -6254.0f, -6255.0f, -6256.0f, -6257.0f, -6258.0f, -6259.0f, -6260.0f, -6261.0f, -6262.0f, -6263.0f, -6264.0f, -6265.0f, -6266.0f, -6267.0f, -6268.0f, -6269.0f, -6270.0f, -6271.0f, -6272.0f, -6273.0f, -6274.0f, -6275.0f, -6276.0f, -6277.0f, -6278.0f, -6279.0f, -6280.0f, -6281.0f, -6282.0f, -6283.0f, -6284.0f, -6285.0f, -6286.0f, -6287.0f, -6288.0f, -6289.0f, -6290.0f, -6291.0f, -6292.0f, -6293.0f, -6294.0f, -6295.0f, -6296.0f, -6297.0f, -6298.0f, -6299.0f, -6300.0f, -6301.0f, -6302.0f, -6303.0f, -6304.0f, -6305.0f, -6306.0f, -6307.0f, -6308.0f, -6309.0f, -6310.0f, -6311.0f, -6312.0f, -6313.0f, -6314.0f, -6315.0f, -6316.0f, -6317.0f, -6318.0f, -6319.0f, -6320.0f, -6321.0f, -6322.0f, -6323.0f, -6324.0f, -6325.0f, -6326.0f, -6327.0f, -6328.0f, -6329.0f, -6330.0f, -6331.0f, -6332.0f, -6333.0f, -6334.0f, -6335.0f, -6336.0f, -6337.0f, -6338.0f, -6339.0f, -6340.0f, -6341.0f, -6342.0f, -6343.0f, -6344.0f, -6345.0f, -6346.0f, -6347.0f, -6348.0f, -6349.0f, -6350.0f, -6351.0f, -6352.0f, -6353.0f, -6354.0f, -6355.0f, -6356.0f, -6357.0f, -6358.0f, -6359.0f}}},
@@ -25,7 +26,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, 56.0f, 57.0f, 58.0f, 59.0f, 0.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, -6.0f, -7.0f, -8.0f, -9.0f, -10.0f, -11.0f, -12.0f, -13.0f, -14.0f, -15.0f, -16.0f, -17.0f, -18.0f, -19.0f, -20.0f, -21.0f, -22.0f, -23.0f, -24.0f, -25.0f, -26.0f, -27.0f, -28.0f, -29.0f, 60.0f, 61.0f, 62.0f, 63.0f, 64.0f, 65.0f, 66.0f, 67.0f, 68.0f, 69.0f, 70.0f, 71.0f, 72.0f, 73.0f, 74.0f, 75.0f, 76.0f, 77.0f, 78.0f, 79.0f, 80.0f, 81.0f, 82.0f, 83.0f, 84.0f, 85.0f, 86.0f, 87.0f, 88.0f, 89.0f, 90.0f, 91.0f, 92.0f, 93.0f, 94.0f, 95.0f, 96.0f, 97.0f, 98.0f, 99.0f, 100.0f, 101.0f, 102.0f, 103.0f, 104.0f, 105.0f, 106.0f, 107.0f, 108.0f, 109.0f, 110.0f, 111.0f, 112.0f, 113.0f, 114.0f, 115.0f, 116.0f, 117.0f, 118.0f, 119.0f, -30.0f, -31.0f, -32.0f, -33.0f, -34.0f, -35.0f, -36.0f, -37.0f, -38.0f, -39.0f, -40.0f, -41.0f, -42.0f, -43.0f, -44.0f, -45.0f, -46.0f, -47.0f, -48.0f, -49.0f, -50.0f, -51.0f, -52.0f, -53.0f, -54.0f, -55.0f, -56.0f, -57.0f, -58.0f, -59.0f, 120.0f, 121.0f, 122.0f, 123.0f, 124.0f, 125.0f, 126.0f, 127.0f, 128.0f, 129.0f, 130.0f, 131.0f, 132.0f, 133.0f, 134.0f, 135.0f, 136.0f, 137.0f, 138.0f, 139.0f, 140.0f, 141.0f, 142.0f, 143.0f, 144.0f, 145.0f, 146.0f, 147.0f, 148.0f, 149.0f, 150.0f, 151.0f, 152.0f, 153.0f, 154.0f, 155.0f, 156.0f, 157.0f, 158.0f, 159.0f, 160.0f, 161.0f, 162.0f, 163.0f, 164.0f, 165.0f, 166.0f, 167.0f, 168.0f, 169.0f, 170.0f, 171.0f, 172.0f, 173.0f, 174.0f, 175.0f, 176.0f, 177.0f, 178.0f, 179.0f, -60.0f, -61.0f, -62.0f, -63.0f, -64.0f, -65.0f, -66.0f, -67.0f, -68.0f, -69.0f, -70.0f, -71.0f, -72.0f, -73.0f, -74.0f, -75.0f, -76.0f, -77.0f, -78.0f, -79.0f, -80.0f, -81.0f, -82.0f, -83.0f, -84.0f, -85.0f, -86.0f, -87.0f, -88.0f, -89.0f, 180.0f, 181.0f, 182.0f, 183.0f, 184.0f, 185.0f, 186.0f, 187.0f, 188.0f, 189.0f, 190.0f, 191.0f, 192.0f, 193.0f, 194.0f, 195.0f, 196.0f, 197.0f, 198.0f, 199.0f, 200.0f, 201.0f, 202.0f, 203.0f, 204.0f, 205.0f, 206.0f, 207.0f, 208.0f, 209.0f, 210.0f, 211.0f, 212.0f, 213.0f, 214.0f, 215.0f, 216.0f, 217.0f, 218.0f, 219.0f, 220.0f, 221.0f, 222.0f, 223.0f, 224.0f, 225.0f, 226.0f, 227.0f, 228.0f, 229.0f, 230.0f, 231.0f, 232.0f, 233.0f, 234.0f, 235.0f, 236.0f, 237.0f, 238.0f, 239.0f, -90.0f, -91.0f, -92.0f, -93.0f, -94.0f, -95.0f, -96.0f, -97.0f, -98.0f, -99.0f, -100.0f, -101.0f, -102.0f, -103.0f, -104.0f, -105.0f, -106.0f, -107.0f, -108.0f, -109.0f, -110.0f, -111.0f, -112.0f, -113.0f, -114.0f, -115.0f, -116.0f, -117.0f, -118.0f, -119.0f, 240.0f, 241.0f, 242.0f, 243.0f, 244.0f, 245.0f, 246.0f, 247.0f, 248.0f, 249.0f, 250.0f, 251.0f, 252.0f, 253.0f, 254.0f, 255.0f, 256.0f, 257.0f, 258.0f, 259.0f, 260.0f, 261.0f, 262.0f, 263.0f, 264.0f, 265.0f, 266.0f, 267.0f, 268.0f, 269.0f, 270.0f, 271.0f, 272.0f, 273.0f, 274.0f, 275.0f, 276.0f, 277.0f, 278.0f, 279.0f, 280.0f, 281.0f, 282.0f, 283.0f, 284.0f, 285.0f, 286.0f, 287.0f, 288.0f, 289.0f, 290.0f, 291.0f, 292.0f, 293.0f, 294.0f, 295.0f, 296.0f, 297.0f, 298.0f, 299.0f, -120.0f, -121.0f, -122.0f, -123.0f, -124.0f, -125.0f, -126.0f, -127.0f, -128.0f, -129.0f, -130.0f, -131.0f, -132.0f, -133.0f, -134.0f, -135.0f, -136.0f, -137.0f, -138.0f, -139.0f, -140.0f, -141.0f, -142.0f, -143.0f, -144.0f, -145.0f, -146.0f, -147.0f, -148.0f, -149.0f, 300.0f, 301.0f, 302.0f, 303.0f, 304.0f, 305.0f, 306.0f, 307.0f, 308.0f, 309.0f, 310.0f, 311.0f, 312.0f, 313.0f, 314.0f, 315.0f, 316.0f, 317.0f, 318.0f, 319.0f, 320.0f, 321.0f, 322.0f, 323.0f, 324.0f, 325.0f, 326.0f, 327.0f, 328.0f, 329.0f, 330.0f, 331.0f, 332.0f, 333.0f, 334.0f, 335.0f, 336.0f, 337.0f, 338.0f, 339.0f, 340.0f, 341.0f, 342.0f, 343.0f, 344.0f, 345.0f, 346.0f, 347.0f, 348.0f, 349.0f, 350.0f, 351.0f, 352.0f, 353.0f, 354.0f, 355.0f, 356.0f, 357.0f, 358.0f, 359.0f, -150.0f, -151.0f, -152.0f, -153.0f, -154.0f, -155.0f, -156.0f, -157.0f, -158.0f, -159.0f, -160.0f, -161.0f, -162.0f, -163.0f, -164.0f, -165.0f, -166.0f, -167.0f, -168.0f, -169.0f, -170.0f, -171.0f, -172.0f, -173.0f, -174.0f, -175.0f, -176.0f, -177.0f, -178.0f, -179.0f, 360.0f, 361.0f, 362.0f, 363.0f, 364.0f, 365.0f, 366.0f, 367.0f, 368.0f, 369.0f, 370.0f, 371.0f, 372.0f, 373.0f, 374.0f, 375.0f, 376.0f, 377.0f, 378.0f, 379.0f, 380.0f, 381.0f, 382.0f, 383.0f, 384.0f, 385.0f, 386.0f, 387.0f, 388.0f, 389.0f, 390.0f, 391.0f, 392.0f, 393.0f, 394.0f, 395.0f, 396.0f, 397.0f, 398.0f, 399.0f, 400.0f, 401.0f, 402.0f, 403.0f, 404.0f, 405.0f, 406.0f, 407.0f, 408.0f, 409.0f, 410.0f, 411.0f, 412.0f, 413.0f, 414.0f, 415.0f, 416.0f, 417.0f, 418.0f, 419.0f, -180.0f, -181.0f, -182.0f, -183.0f, -184.0f, -185.0f, -186.0f, -187.0f, -188.0f, -189.0f, -190.0f, -191.0f, -192.0f, -193.0f, -194.0f, -195.0f, -196.0f, -197.0f, -198.0f, -199.0f, -200.0f, -201.0f, -202.0f, -203.0f, -204.0f, -205.0f, -206.0f, -207.0f, -208.0f, -209.0f, 420.0f, 421.0f, 422.0f, 423.0f, 424.0f, 425.0f, 426.0f, 427.0f, 428.0f, 429.0f, 430.0f, 431.0f, 432.0f, 433.0f, 434.0f, 435.0f, 436.0f, 437.0f, 438.0f, 439.0f, 440.0f, 441.0f, 442.0f, 443.0f, 444.0f, 445.0f, 446.0f, 447.0f, 448.0f, 449.0f, 450.0f, 451.0f, 452.0f, 453.0f, 454.0f, 455.0f, 456.0f, 457.0f, 458.0f, 459.0f, 460.0f, 461.0f, 462.0f, 463.0f, 464.0f, 465.0f, 466.0f, 467.0f, 468.0f, 469.0f, 470.0f, 471.0f, 472.0f, 473.0f, 474.0f, 475.0f, 476.0f, 477.0f, 478.0f, 479.0f, -210.0f, -211.0f, -212.0f, -213.0f, -214.0f, -215.0f, -216.0f, -217.0f, -218.0f, -219.0f, -220.0f, -221.0f, -222.0f, -223.0f, -224.0f, -225.0f, -226.0f, -227.0f, -228.0f, -229.0f, -230.0f, -231.0f, -232.0f, -233.0f, -234.0f, -235.0f, -236.0f, -237.0f, -238.0f, -239.0f, 480.0f, 481.0f, 482.0f, 483.0f, 484.0f, 485.0f, 486.0f, 487.0f, 488.0f, 489.0f, 490.0f, 491.0f, 492.0f, 493.0f, 494.0f, 495.0f, 496.0f, 497.0f, 498.0f, 499.0f, 500.0f, 501.0f, 502.0f, 503.0f, 504.0f, 505.0f, 506.0f, 507.0f, 508.0f, 509.0f, 510.0f, 511.0f, 512.0f, 513.0f, 514.0f, 515.0f, 516.0f, 517.0f, 518.0f, 519.0f, 520.0f, 521.0f, 522.0f, 523.0f, 524.0f, 525.0f, 526.0f, 527.0f, 528.0f, 529.0f, 530.0f, 531.0f, 532.0f, 533.0f, 534.0f, 535.0f, 536.0f, 537.0f, 538.0f, 539.0f, -240.0f, -241.0f, -242.0f, -243.0f, -244.0f, -245.0f, -246.0f, -247.0f, -248.0f, -249.0f, -250.0f, -251.0f, -252.0f, -253.0f, -254.0f, -255.0f, -256.0f, -257.0f, -258.0f, -259.0f, -260.0f, -261.0f, -262.0f, -263.0f, -264.0f, -265.0f, -266.0f, -267.0f, -268.0f, -269.0f, 540.0f, 541.0f, 542.0f, 543.0f, 544.0f, 545.0f, 546.0f, 547.0f, 548.0f, 549.0f, 550.0f, 551.0f, 552.0f, 553.0f, 554.0f, 555.0f, 556.0f, 557.0f, 558.0f, 559.0f, 560.0f, 561.0f, 562.0f, 563.0f, 564.0f, 565.0f, 566.0f, 567.0f, 568.0f, 569.0f, 570.0f, 571.0f, 572.0f, 573.0f, 574.0f, 575.0f, 576.0f, 577.0f, 578.0f, 579.0f, 580.0f, 581.0f, 582.0f, 583.0f, 584.0f, 585.0f, 586.0f, 587.0f, 588.0f, 589.0f, 590.0f, 591.0f, 592.0f, 593.0f, 594.0f, 595.0f, 596.0f, 597.0f, 598.0f, 599.0f, -270.0f, -271.0f, -272.0f, -273.0f, -274.0f, -275.0f, -276.0f, -277.0f, -278.0f, -279.0f, -280.0f, -281.0f, -282.0f, -283.0f, -284.0f, -285.0f, -286.0f, -287.0f, -288.0f, -289.0f, -290.0f, -291.0f, -292.0f, -293.0f, -294.0f, -295.0f, -296.0f, -297.0f, -298.0f, -299.0f, 600.0f, 601.0f, 602.0f, 603.0f, 604.0f, 605.0f, 606.0f, 607.0f, 608.0f, 609.0f, 610.0f, 611.0f, 612.0f, 613.0f, 614.0f, 615.0f, 616.0f, 617.0f, 618.0f, 619.0f, 620.0f, 621.0f, 622.0f, 623.0f, 624.0f, 625.0f, 626.0f, 627.0f, 628.0f, 629.0f, 630.0f, 631.0f, 632.0f, 633.0f, 634.0f, 635.0f, 636.0f, 637.0f, 638.0f, 639.0f, 640.0f, 641.0f, 642.0f, 643.0f, 644.0f, 645.0f, 646.0f, 647.0f, 648.0f, 649.0f, 650.0f, 651.0f, 652.0f, 653.0f, 654.0f, 655.0f, 656.0f, 657.0f, 658.0f, 659.0f, -300.0f, -301.0f, -302.0f, -303.0f, -304.0f, -305.0f, -306.0f, -307.0f, -308.0f, -309.0f, -310.0f, -311.0f, -312.0f, -313.0f, -314.0f, -315.0f, -316.0f, -317.0f, -318.0f, -319.0f, -320.0f, -321.0f, -322.0f, -323.0f, -324.0f, -325.0f, -326.0f, -327.0f, -328.0f, -329.0f, 660.0f, 661.0f, 662.0f, 663.0f, 664.0f, 665.0f, 666.0f, 667.0f, 668.0f, 669.0f, 670.0f, 671.0f, 672.0f, 673.0f, 674.0f, 675.0f, 676.0f, 677.0f, 678.0f, 679.0f, 680.0f, 681.0f, 682.0f, 683.0f, 684.0f, 685.0f, 686.0f, 687.0f, 688.0f, 689.0f, 690.0f, 691.0f, 692.0f, 693.0f, 694.0f, 695.0f, 696.0f, 697.0f, 698.0f, 699.0f, 700.0f, 701.0f, 702.0f, 703.0f, 704.0f, 705.0f, 706.0f, 707.0f, 708.0f, 709.0f, 710.0f, 711.0f, 712.0f, 713.0f, 714.0f, 715.0f, 716.0f, 717.0f, 718.0f, 719.0f, -330.0f, -331.0f, -332.0f, -333.0f, -334.0f, -335.0f, -336.0f, -337.0f, -338.0f, -339.0f, -340.0f, -341.0f, -342.0f, -343.0f, -344.0f, -345.0f, -346.0f, -347.0f, -348.0f, -349.0f, -350.0f, -351.0f, -352.0f, -353.0f, -354.0f, -355.0f, -356.0f, -357.0f, -358.0f, -359.0f, 720.0f, 721.0f, 722.0f, 723.0f, 724.0f, 725.0f, 726.0f, 727.0f, 728.0f, 729.0f, 730.0f, 731.0f, 732.0f, 733.0f, 734.0f, 735.0f, 736.0f, 737.0f, 738.0f, 739.0f, 740.0f, 741.0f, 742.0f, 743.0f, 744.0f, 745.0f, 746.0f, 747.0f, 748.0f, 749.0f, 750.0f, 751.0f, 752.0f, 753.0f, 754.0f, 755.0f, 756.0f, 757.0f, 758.0f, 759.0f, 760.0f, 761.0f, 762.0f, 763.0f, 764.0f, 765.0f, 766.0f, 767.0f, 768.0f, 769.0f, 770.0f, 771.0f, 772.0f, 773.0f, 774.0f, 775.0f, 776.0f, 777.0f, 778.0f, 779.0f, -360.0f, -361.0f, -362.0f, -363.0f, -364.0f, -365.0f, -366.0f, -367.0f, -368.0f, -369.0f, -370.0f, -371.0f, -372.0f, -373.0f, -374.0f, -375.0f, -376.0f, -377.0f, -378.0f, -379.0f, -380.0f, -381.0f, -382.0f, -383.0f, -384.0f, -385.0f, -386.0f, -387.0f, -388.0f, -389.0f, 780.0f, 781.0f, 782.0f, 783.0f, 784.0f, 785.0f, 786.0f, 787.0f, 788.0f, 789.0f, 790.0f, 791.0f, 792.0f, 793.0f, 794.0f, 795.0f, 796.0f, 797.0f, 798.0f, 799.0f, 800.0f, 801.0f, 802.0f, 803.0f, 804.0f, 805.0f, 806.0f, 807.0f, 808.0f, 809.0f, 810.0f, 811.0f, 812.0f, 813.0f, 814.0f, 815.0f, 816.0f, 817.0f, 818.0f, 819.0f, 820.0f, 821.0f, 822.0f, 823.0f, 824.0f, 825.0f, 826.0f, 827.0f, 828.0f, 829.0f, 830.0f, 831.0f, 832.0f, 833.0f, 834.0f, 835.0f, 836.0f, 837.0f, 838.0f, 839.0f, -390.0f, -391.0f, -392.0f, -393.0f, -394.0f, -395.0f, -396.0f, -397.0f, -398.0f, -399.0f, -400.0f, -401.0f, -402.0f, -403.0f, -404.0f, -405.0f, -406.0f, -407.0f, -408.0f, -409.0f, -410.0f, -411.0f, -412.0f, -413.0f, -414.0f, -415.0f, -416.0f, -417.0f, -418.0f, -419.0f, 840.0f, 841.0f, 842.0f, 843.0f, 844.0f, 845.0f, 846.0f, 847.0f, 848.0f, 849.0f, 850.0f, 851.0f, 852.0f, 853.0f, 854.0f, 855.0f, 856.0f, 857.0f, 858.0f, 859.0f, 860.0f, 861.0f, 862.0f, 863.0f, 864.0f, 865.0f, 866.0f, 867.0f, 868.0f, 869.0f, 870.0f, 871.0f, 872.0f, 873.0f, 874.0f, 875.0f, 876.0f, 877.0f, 878.0f, 879.0f, 880.0f, 881.0f, 882.0f, 883.0f, 884.0f, 885.0f, 886.0f, 887.0f, 888.0f, 889.0f, 890.0f, 891.0f, 892.0f, 893.0f, 894.0f, 895.0f, 896.0f, 897.0f, 898.0f, 899.0f, -420.0f, -421.0f, -422.0f, -423.0f, -424.0f, -425.0f, -426.0f, -427.0f, -428.0f, -429.0f, -430.0f, -431.0f, -432.0f, -433.0f, -434.0f, -435.0f, -436.0f, -437.0f, -438.0f, -439.0f, -440.0f, -441.0f, -442.0f, -443.0f, -444.0f, -445.0f, -446.0f, -447.0f, -448.0f, -449.0f, 900.0f, 901.0f, 902.0f, 903.0f, 904.0f, 905.0f, 906.0f, 907.0f, 908.0f, 909.0f, 910.0f, 911.0f, 912.0f, 913.0f, 914.0f, 915.0f, 916.0f, 917.0f, 918.0f, 919.0f, 920.0f, 921.0f, 922.0f, 923.0f, 924.0f, 925.0f, 926.0f, 927.0f, 928.0f, 929.0f, 930.0f, 931.0f, 932.0f, 933.0f, 934.0f, 935.0f, 936.0f, 937.0f, 938.0f, 939.0f, 940.0f, 941.0f, 942.0f, 943.0f, 944.0f, 945.0f, 946.0f, 947.0f, 948.0f, 949.0f, 950.0f, 951.0f, 952.0f, 953.0f, 954.0f, 955.0f, 956.0f, 957.0f, 958.0f, 959.0f, -450.0f, -451.0f, -452.0f, -453.0f, -454.0f, -455.0f, -456.0f, -457.0f, -458.0f, -459.0f, -460.0f, -461.0f, -462.0f, -463.0f, -464.0f, -465.0f, -466.0f, -467.0f, -468.0f, -469.0f, -470.0f, -471.0f, -472.0f, -473.0f, -474.0f, -475.0f, -476.0f, -477.0f, -478.0f, -479.0f, 960.0f, 961.0f, 962.0f, 963.0f, 964.0f, 965.0f, 966.0f, 967.0f, 968.0f, 969.0f, 970.0f, 971.0f, 972.0f, 973.0f, 974.0f, 975.0f, 976.0f, 977.0f, 978.0f, 979.0f, 980.0f, 981.0f, 982.0f, 983.0f, 984.0f, 985.0f, 986.0f, 987.0f, 988.0f, 989.0f, 990.0f, 991.0f, 992.0f, 993.0f, 994.0f, 995.0f, 996.0f, 997.0f, 998.0f, 999.0f, 1000.0f, 1001.0f, 1002.0f, 1003.0f, 1004.0f, 1005.0f, 1006.0f, 1007.0f, 1008.0f, 1009.0f, 1010.0f, 1011.0f, 1012.0f, 1013.0f, 1014.0f, 1015.0f, 1016.0f, 1017.0f, 1018.0f, 1019.0f, -480.0f, -481.0f, -482.0f, -483.0f, -484.0f, -485.0f, -486.0f, -487.0f, -488.0f, -489.0f, -490.0f, -491.0f, -492.0f, -493.0f, -494.0f, -495.0f, -496.0f, -497.0f, -498.0f, -499.0f, -500.0f, -501.0f, -502.0f, -503.0f, -504.0f, -505.0f, -506.0f, -507.0f, -508.0f, -509.0f, 1020.0f, 1021.0f, 1022.0f, 1023.0f, 1024.0f, 1025.0f, 1026.0f, 1027.0f, 1028.0f, 1029.0f, 1030.0f, 1031.0f, 1032.0f, 1033.0f, 1034.0f, 1035.0f, 1036.0f, 1037.0f, 1038.0f, 1039.0f, 1040.0f, 1041.0f, 1042.0f, 1043.0f, 1044.0f, 1045.0f, 1046.0f, 1047.0f, 1048.0f, 1049.0f, 1050.0f, 1051.0f, 1052.0f, 1053.0f, 1054.0f, 1055.0f, 1056.0f, 1057.0f, 1058.0f, 1059.0f, 1060.0f, 1061.0f, 1062.0f, 1063.0f, 1064.0f, 1065.0f, 1066.0f, 1067.0f, 1068.0f, 1069.0f, 1070.0f, 1071.0f, 1072.0f, 1073.0f, 1074.0f, 1075.0f, 1076.0f, 1077.0f, 1078.0f, 1079.0f, -510.0f, -511.0f, -512.0f, -513.0f, -514.0f, -515.0f, -516.0f, -517.0f, -518.0f, -519.0f, -520.0f, -521.0f, -522.0f, -523.0f, -524.0f, -525.0f, -526.0f, -527.0f, -528.0f, -529.0f, -530.0f, -531.0f, -532.0f, -533.0f, -534.0f, -535.0f, -536.0f, -537.0f, -538.0f, -539.0f, 1080.0f, 1081.0f, 1082.0f, 1083.0f, 1084.0f, 1085.0f, 1086.0f, 1087.0f, 1088.0f, 1089.0f, 1090.0f, 1091.0f, 1092.0f, 1093.0f, 1094.0f, 1095.0f, 1096.0f, 1097.0f, 1098.0f, 1099.0f, 1100.0f, 1101.0f, 1102.0f, 1103.0f, 1104.0f, 1105.0f, 1106.0f, 1107.0f, 1108.0f, 1109.0f, 1110.0f, 1111.0f, 1112.0f, 1113.0f, 1114.0f, 1115.0f, 1116.0f, 1117.0f, 1118.0f, 1119.0f, 1120.0f, 1121.0f, 1122.0f, 1123.0f, 1124.0f, 1125.0f, 1126.0f, 1127.0f, 1128.0f, 1129.0f, 1130.0f, 1131.0f, 1132.0f, 1133.0f, 1134.0f, 1135.0f, 1136.0f, 1137.0f, 1138.0f, 1139.0f, -540.0f, -541.0f, -542.0f, -543.0f, -544.0f, -545.0f, -546.0f, -547.0f, -548.0f, -549.0f, -550.0f, -551.0f, -552.0f, -553.0f, -554.0f, -555.0f, -556.0f, -557.0f, -558.0f, -559.0f, -560.0f, -561.0f, -562.0f, -563.0f, -564.0f, -565.0f, -566.0f, -567.0f, -568.0f, -569.0f, 1140.0f, 1141.0f, 1142.0f, 1143.0f, 1144.0f, 1145.0f, 1146.0f, 1147.0f, 1148.0f, 1149.0f, 1150.0f, 1151.0f, 1152.0f, 1153.0f, 1154.0f, 1155.0f, 1156.0f, 1157.0f, 1158.0f, 1159.0f, 1160.0f, 1161.0f, 1162.0f, 1163.0f, 1164.0f, 1165.0f, 1166.0f, 1167.0f, 1168.0f, 1169.0f, 1170.0f, 1171.0f, 1172.0f, 1173.0f, 1174.0f, 1175.0f, 1176.0f, 1177.0f, 1178.0f, 1179.0f, 1180.0f, 1181.0f, 1182.0f, 1183.0f, 1184.0f, 1185.0f, 1186.0f, 1187.0f, 1188.0f, 1189.0f, 1190.0f, 1191.0f, 1192.0f, 1193.0f, 1194.0f, 1195.0f, 1196.0f, 1197.0f, 1198.0f, 1199.0f, -570.0f, -571.0f, -572.0f, -573.0f, -574.0f, -575.0f, -576.0f, -577.0f, -578.0f, -579.0f, -580.0f, -581.0f, -582.0f, -583.0f, -584.0f, -585.0f, -586.0f, -587.0f, -588.0f, -589.0f, -590.0f, -591.0f, -592.0f, -593.0f, -594.0f, -595.0f, -596.0f, -597.0f, -598.0f, -599.0f, 1200.0f, 1201.0f, 1202.0f, 1203.0f, 1204.0f, 1205.0f, 1206.0f, 1207.0f, 1208.0f, 1209.0f, 1210.0f, 1211.0f, 1212.0f, 1213.0f, 1214.0f, 1215.0f, 1216.0f, 1217.0f, 1218.0f, 1219.0f, 1220.0f, 1221.0f, 1222.0f, 1223.0f, 1224.0f, 1225.0f, 1226.0f, 1227.0f, 1228.0f, 1229.0f, 1230.0f, 1231.0f, 1232.0f, 1233.0f, 1234.0f, 1235.0f, 1236.0f, 1237.0f, 1238.0f, 1239.0f, 1240.0f, 1241.0f, 1242.0f, 1243.0f, 1244.0f, 1245.0f, 1246.0f, 1247.0f, 1248.0f, 1249.0f, 1250.0f, 1251.0f, 1252.0f, 1253.0f, 1254.0f, 1255.0f, 1256.0f, 1257.0f, 1258.0f, 1259.0f, -600.0f, -601.0f, -602.0f, -603.0f, -604.0f, -605.0f, -606.0f, -607.0f, -608.0f, -609.0f, -610.0f, -611.0f, -612.0f, -613.0f, -614.0f, -615.0f, -616.0f, -617.0f, -618.0f, -619.0f, -620.0f, -621.0f, -622.0f, -623.0f, -624.0f, -625.0f, -626.0f, -627.0f, -628.0f, -629.0f, 1260.0f, 1261.0f, 1262.0f, 1263.0f, 1264.0f, 1265.0f, 1266.0f, 1267.0f, 1268.0f, 1269.0f, 1270.0f, 1271.0f, 1272.0f, 1273.0f, 1274.0f, 1275.0f, 1276.0f, 1277.0f, 1278.0f, 1279.0f, 1280.0f, 1281.0f, 1282.0f, 1283.0f, 1284.0f, 1285.0f, 1286.0f, 1287.0f, 1288.0f, 1289.0f, 1290.0f, 1291.0f, 1292.0f, 1293.0f, 1294.0f, 1295.0f, 1296.0f, 1297.0f, 1298.0f, 1299.0f, 1300.0f, 1301.0f, 1302.0f, 1303.0f, 1304.0f, 1305.0f, 1306.0f, 1307.0f, 1308.0f, 1309.0f, 1310.0f, 1311.0f, 1312.0f, 1313.0f, 1314.0f, 1315.0f, 1316.0f, 1317.0f, 1318.0f, 1319.0f, -630.0f, -631.0f, -632.0f, -633.0f, -634.0f, -635.0f, -636.0f, -637.0f, -638.0f, -639.0f, -640.0f, -641.0f, -642.0f, -643.0f, -644.0f, -645.0f, -646.0f, -647.0f, -648.0f, -649.0f, -650.0f, -651.0f, -652.0f, -653.0f, -654.0f, -655.0f, -656.0f, -657.0f, -658.0f, -659.0f, 1320.0f, 1321.0f, 1322.0f, 1323.0f, 1324.0f, 1325.0f, 1326.0f, 1327.0f, 1328.0f, 1329.0f, 1330.0f, 1331.0f, 1332.0f, 1333.0f, 1334.0f, 1335.0f, 1336.0f, 1337.0f, 1338.0f, 1339.0f, 1340.0f, 1341.0f, 1342.0f, 1343.0f, 1344.0f, 1345.0f, 1346.0f, 1347.0f, 1348.0f, 1349.0f, 1350.0f, 1351.0f, 1352.0f, 1353.0f, 1354.0f, 1355.0f, 1356.0f, 1357.0f, 1358.0f, 1359.0f, 1360.0f, 1361.0f, 1362.0f, 1363.0f, 1364.0f, 1365.0f, 1366.0f, 1367.0f, 1368.0f, 1369.0f, 1370.0f, 1371.0f, 1372.0f, 1373.0f, 1374.0f, 1375.0f, 1376.0f, 1377.0f, 1378.0f, 1379.0f, -660.0f, -661.0f, -662.0f, -663.0f, -664.0f, -665.0f, -666.0f, -667.0f, -668.0f, -669.0f, -670.0f, -671.0f, -672.0f, -673.0f, -674.0f, -675.0f, -676.0f, -677.0f, -678.0f, -679.0f, -680.0f, -681.0f, -682.0f, -683.0f, -684.0f, -685.0f, -686.0f, -687.0f, -688.0f, -689.0f, 1380.0f, 1381.0f, 1382.0f, 1383.0f, 1384.0f, 1385.0f, 1386.0f, 1387.0f, 1388.0f, 1389.0f, 1390.0f, 1391.0f, 1392.0f, 1393.0f, 1394.0f, 1395.0f, 1396.0f, 1397.0f, 1398.0f, 1399.0f, 1400.0f, 1401.0f, 1402.0f, 1403.0f, 1404.0f, 1405.0f, 1406.0f, 1407.0f, 1408.0f, 1409.0f, 1410.0f, 1411.0f, 1412.0f, 1413.0f, 1414.0f, 1415.0f, 1416.0f, 1417.0f, 1418.0f, 1419.0f, 1420.0f, 1421.0f, 1422.0f, 1423.0f, 1424.0f, 1425.0f, 1426.0f, 1427.0f, 1428.0f, 1429.0f, 1430.0f, 1431.0f, 1432.0f, 1433.0f, 1434.0f, 1435.0f, 1436.0f, 1437.0f, 1438.0f, 1439.0f, -690.0f, -691.0f, -692.0f, -693.0f, -694.0f, -695.0f, -696.0f, -697.0f, -698.0f, -699.0f, -700.0f, -701.0f, -702.0f, -703.0f, -704.0f, -705.0f, -706.0f, -707.0f, -708.0f, -709.0f, -710.0f, -711.0f, -712.0f, -713.0f, -714.0f, -715.0f, -716.0f, -717.0f, -718.0f, -719.0f, 1440.0f, 1441.0f, 1442.0f, 1443.0f, 1444.0f, 1445.0f, 1446.0f, 1447.0f, 1448.0f, 1449.0f, 1450.0f, 1451.0f, 1452.0f, 1453.0f, 1454.0f, 1455.0f, 1456.0f, 1457.0f, 1458.0f, 1459.0f, 1460.0f, 1461.0f, 1462.0f, 1463.0f, 1464.0f, 1465.0f, 1466.0f, 1467.0f, 1468.0f, 1469.0f, 1470.0f, 1471.0f, 1472.0f, 1473.0f, 1474.0f, 1475.0f, 1476.0f, 1477.0f, 1478.0f, 1479.0f, 1480.0f, 1481.0f, 1482.0f, 1483.0f, 1484.0f, 1485.0f, 1486.0f, 1487.0f, 1488.0f, 1489.0f, 1490.0f, 1491.0f, 1492.0f, 1493.0f, 1494.0f, 1495.0f, 1496.0f, 1497.0f, 1498.0f, 1499.0f, -720.0f, -721.0f, -722.0f, -723.0f, -724.0f, -725.0f, -726.0f, -727.0f, -728.0f, -729.0f, -730.0f, -731.0f, -732.0f, -733.0f, -734.0f, -735.0f, -736.0f, -737.0f, -738.0f, -739.0f, -740.0f, -741.0f, -742.0f, -743.0f, -744.0f, -745.0f, -746.0f, -747.0f, -748.0f, -749.0f, 1500.0f, 1501.0f, 1502.0f, 1503.0f, 1504.0f, 1505.0f, 1506.0f, 1507.0f, 1508.0f, 1509.0f, 1510.0f, 1511.0f, 1512.0f, 1513.0f, 1514.0f, 1515.0f, 1516.0f, 1517.0f, 1518.0f, 1519.0f, 1520.0f, 1521.0f, 1522.0f, 1523.0f, 1524.0f, 1525.0f, 1526.0f, 1527.0f, 1528.0f, 1529.0f, 1530.0f, 1531.0f, 1532.0f, 1533.0f, 1534.0f, 1535.0f, 1536.0f, 1537.0f, 1538.0f, 1539.0f, 1540.0f, 1541.0f, 1542.0f, 1543.0f, 1544.0f, 1545.0f, 1546.0f, 1547.0f, 1548.0f, 1549.0f, 1550.0f, 1551.0f, 1552.0f, 1553.0f, 1554.0f, 1555.0f, 1556.0f, 1557.0f, 1558.0f, 1559.0f, -750.0f, -751.0f, -752.0f, -753.0f, -754.0f, -755.0f, -756.0f, -757.0f, -758.0f, -759.0f, -760.0f, -761.0f, -762.0f, -763.0f, -764.0f, -765.0f, -766.0f, -767.0f, -768.0f, -769.0f, -770.0f, -771.0f, -772.0f, -773.0f, -774.0f, -775.0f, -776.0f, -777.0f, -778.0f, -779.0f, 1560.0f, 1561.0f, 1562.0f, 1563.0f, 1564.0f, 1565.0f, 1566.0f, 1567.0f, 1568.0f, 1569.0f, 1570.0f, 1571.0f, 1572.0f, 1573.0f, 1574.0f, 1575.0f, 1576.0f, 1577.0f, 1578.0f, 1579.0f, 1580.0f, 1581.0f, 1582.0f, 1583.0f, 1584.0f, 1585.0f, 1586.0f, 1587.0f, 1588.0f, 1589.0f, 1590.0f, 1591.0f, 1592.0f, 1593.0f, 1594.0f, 1595.0f, 1596.0f, 1597.0f, 1598.0f, 1599.0f, 1600.0f, 1601.0f, 1602.0f, 1603.0f, 1604.0f, 1605.0f, 1606.0f, 1607.0f, 1608.0f, 1609.0f, 1610.0f, 1611.0f, 1612.0f, 1613.0f, 1614.0f, 1615.0f, 1616.0f, 1617.0f, 1618.0f, 1619.0f, -780.0f, -781.0f, -782.0f, -783.0f, -784.0f, -785.0f, -786.0f, -787.0f, -788.0f, -789.0f, -790.0f, -791.0f, -792.0f, -793.0f, -794.0f, -795.0f, -796.0f, -797.0f, -798.0f, -799.0f, -800.0f, -801.0f, -802.0f, -803.0f, -804.0f, -805.0f, -806.0f, -807.0f, -808.0f, -809.0f, 1620.0f, 1621.0f, 1622.0f, 1623.0f, 1624.0f, 1625.0f, 1626.0f, 1627.0f, 1628.0f, 1629.0f, 1630.0f, 1631.0f, 1632.0f, 1633.0f, 1634.0f, 1635.0f, 1636.0f, 1637.0f, 1638.0f, 1639.0f, 1640.0f, 1641.0f, 1642.0f, 1643.0f, 1644.0f, 1645.0f, 1646.0f, 1647.0f, 1648.0f, 1649.0f, 1650.0f, 1651.0f, 1652.0f, 1653.0f, 1654.0f, 1655.0f, 1656.0f, 1657.0f, 1658.0f, 1659.0f, 1660.0f, 1661.0f, 1662.0f, 1663.0f, 1664.0f, 1665.0f, 1666.0f, 1667.0f, 1668.0f, 1669.0f, 1670.0f, 1671.0f, 1672.0f, 1673.0f, 1674.0f, 1675.0f, 1676.0f, 1677.0f, 1678.0f, 1679.0f, -810.0f, -811.0f, -812.0f, -813.0f, -814.0f, -815.0f, -816.0f, -817.0f, -818.0f, -819.0f, -820.0f, -821.0f, -822.0f, -823.0f, -824.0f, -825.0f, -826.0f, -827.0f, -828.0f, -829.0f, -830.0f, -831.0f, -832.0f, -833.0f, -834.0f, -835.0f, -836.0f, -837.0f, -838.0f, -839.0f, 1680.0f, 1681.0f, 1682.0f, 1683.0f, 1684.0f, 1685.0f, 1686.0f, 1687.0f, 1688.0f, 1689.0f, 1690.0f, 1691.0f, 1692.0f, 1693.0f, 1694.0f, 1695.0f, 1696.0f, 1697.0f, 1698.0f, 1699.0f, 1700.0f, 1701.0f, 1702.0f, 1703.0f, 1704.0f, 1705.0f, 1706.0f, 1707.0f, 1708.0f, 1709.0f, 1710.0f, 1711.0f, 1712.0f, 1713.0f, 1714.0f, 1715.0f, 1716.0f, 1717.0f, 1718.0f, 1719.0f, 1720.0f, 1721.0f, 1722.0f, 1723.0f, 1724.0f, 1725.0f, 1726.0f, 1727.0f, 1728.0f, 1729.0f, 1730.0f, 1731.0f, 1732.0f, 1733.0f, 1734.0f, 1735.0f, 1736.0f, 1737.0f, 1738.0f, 1739.0f, -840.0f, -841.0f, -842.0f, -843.0f, -844.0f, -845.0f, -846.0f, -847.0f, -848.0f, -849.0f, -850.0f, -851.0f, -852.0f, -853.0f, -854.0f, -855.0f, -856.0f, -857.0f, -858.0f, -859.0f, -860.0f, -861.0f, -862.0f, -863.0f, -864.0f, -865.0f, -866.0f, -867.0f, -868.0f, -869.0f, 1740.0f, 1741.0f, 1742.0f, 1743.0f, 1744.0f, 1745.0f, 1746.0f, 1747.0f, 1748.0f, 1749.0f, 1750.0f, 1751.0f, 1752.0f, 1753.0f, 1754.0f, 1755.0f, 1756.0f, 1757.0f, 1758.0f, 1759.0f, 1760.0f, 1761.0f, 1762.0f, 1763.0f, 1764.0f, 1765.0f, 1766.0f, 1767.0f, 1768.0f, 1769.0f, 1770.0f, 1771.0f, 1772.0f, 1773.0f, 1774.0f, 1775.0f, 1776.0f, 1777.0f, 1778.0f, 1779.0f, 1780.0f, 1781.0f, 1782.0f, 1783.0f, 1784.0f, 1785.0f, 1786.0f, 1787.0f, 1788.0f, 1789.0f, 1790.0f, 1791.0f, 1792.0f, 1793.0f, 1794.0f, 1795.0f, 1796.0f, 1797.0f, 1798.0f, 1799.0f, -870.0f, -871.0f, -872.0f, -873.0f, -874.0f, -875.0f, -876.0f, -877.0f, -878.0f, -879.0f, -880.0f, -881.0f, -882.0f, -883.0f, -884.0f, -885.0f, -886.0f, -887.0f, -888.0f, -889.0f, -890.0f, -891.0f, -892.0f, -893.0f, -894.0f, -895.0f, -896.0f, -897.0f, -898.0f, -899.0f, 1800.0f, 1801.0f, 1802.0f, 1803.0f, 1804.0f, 1805.0f, 1806.0f, 1807.0f, 1808.0f, 1809.0f, 1810.0f, 1811.0f, 1812.0f, 1813.0f, 1814.0f, 1815.0f, 1816.0f, 1817.0f, 1818.0f, 1819.0f, 1820.0f, 1821.0f, 1822.0f, 1823.0f, 1824.0f, 1825.0f, 1826.0f, 1827.0f, 1828.0f, 1829.0f, 1830.0f, 1831.0f, 1832.0f, 1833.0f, 1834.0f, 1835.0f, 1836.0f, 1837.0f, 1838.0f, 1839.0f, 1840.0f, 1841.0f, 1842.0f, 1843.0f, 1844.0f, 1845.0f, 1846.0f, 1847.0f, 1848.0f, 1849.0f, 1850.0f, 1851.0f, 1852.0f, 1853.0f, 1854.0f, 1855.0f, 1856.0f, 1857.0f, 1858.0f, 1859.0f, -900.0f, -901.0f, -902.0f, -903.0f, -904.0f, -905.0f, -906.0f, -907.0f, -908.0f, -909.0f, -910.0f, -911.0f, -912.0f, -913.0f, -914.0f, -915.0f, -916.0f, -917.0f, -918.0f, -919.0f, -920.0f, -921.0f, -922.0f, -923.0f, -924.0f, -925.0f, -926.0f, -927.0f, -928.0f, -929.0f, 1860.0f, 1861.0f, 1862.0f, 1863.0f, 1864.0f, 1865.0f, 1866.0f, 1867.0f, 1868.0f, 1869.0f, 1870.0f, 1871.0f, 1872.0f, 1873.0f, 1874.0f, 1875.0f, 1876.0f, 1877.0f, 1878.0f, 1879.0f, 1880.0f, 1881.0f, 1882.0f, 1883.0f, 1884.0f, 1885.0f, 1886.0f, 1887.0f, 1888.0f, 1889.0f, 1890.0f, 1891.0f, 1892.0f, 1893.0f, 1894.0f, 1895.0f, 1896.0f, 1897.0f, 1898.0f, 1899.0f, 1900.0f, 1901.0f, 1902.0f, 1903.0f, 1904.0f, 1905.0f, 1906.0f, 1907.0f, 1908.0f, 1909.0f, 1910.0f, 1911.0f, 1912.0f, 1913.0f, 1914.0f, 1915.0f, 1916.0f, 1917.0f, 1918.0f, 1919.0f, -930.0f, -931.0f, -932.0f, -933.0f, -934.0f, -935.0f, -936.0f, -937.0f, -938.0f, -939.0f, -940.0f, -941.0f, -942.0f, -943.0f, -944.0f, -945.0f, -946.0f, -947.0f, -948.0f, -949.0f, -950.0f, -951.0f, -952.0f, -953.0f, -954.0f, -955.0f, -956.0f, -957.0f, -958.0f, -959.0f, 1920.0f, 1921.0f, 1922.0f, 1923.0f, 1924.0f, 1925.0f, 1926.0f, 1927.0f, 1928.0f, 1929.0f, 1930.0f, 1931.0f, 1932.0f, 1933.0f, 1934.0f, 1935.0f, 1936.0f, 1937.0f, 1938.0f, 1939.0f, 1940.0f, 1941.0f, 1942.0f, 1943.0f, 1944.0f, 1945.0f, 1946.0f, 1947.0f, 1948.0f, 1949.0f, 1950.0f, 1951.0f, 1952.0f, 1953.0f, 1954.0f, 1955.0f, 1956.0f, 1957.0f, 1958.0f, 1959.0f, 1960.0f, 1961.0f, 1962.0f, 1963.0f, 1964.0f, 1965.0f, 1966.0f, 1967.0f, 1968.0f, 1969.0f, 1970.0f, 1971.0f, 1972.0f, 1973.0f, 1974.0f, 1975.0f, 1976.0f, 1977.0f, 1978.0f, 1979.0f, -960.0f, -961.0f, -962.0f, -963.0f, -964.0f, -965.0f, -966.0f, -967.0f, -968.0f, -969.0f, -970.0f, -971.0f, -972.0f, -973.0f, -974.0f, -975.0f, -976.0f, -977.0f, -978.0f, -979.0f, -980.0f, -981.0f, -982.0f, -983.0f, -984.0f, -985.0f, -986.0f, -987.0f, -988.0f, -989.0f, 1980.0f, 1981.0f, 1982.0f, 1983.0f, 1984.0f, 1985.0f, 1986.0f, 1987.0f, 1988.0f, 1989.0f, 1990.0f, 1991.0f, 1992.0f, 1993.0f, 1994.0f, 1995.0f, 1996.0f, 1997.0f, 1998.0f, 1999.0f, 2000.0f, 2001.0f, 2002.0f, 2003.0f, 2004.0f, 2005.0f, 2006.0f, 2007.0f, 2008.0f, 2009.0f, 2010.0f, 2011.0f, 2012.0f, 2013.0f, 2014.0f, 2015.0f, 2016.0f, 2017.0f, 2018.0f, 2019.0f, 2020.0f, 2021.0f, 2022.0f, 2023.0f, 2024.0f, 2025.0f, 2026.0f, 2027.0f, 2028.0f, 2029.0f, 2030.0f, 2031.0f, 2032.0f, 2033.0f, 2034.0f, 2035.0f, 2036.0f, 2037.0f, 2038.0f, 2039.0f, -990.0f, -991.0f, -992.0f, -993.0f, -994.0f, -995.0f, -996.0f, -997.0f, -998.0f, -999.0f, -1000.0f, -1001.0f, -1002.0f, -1003.0f, -1004.0f, -1005.0f, -1006.0f, -1007.0f, -1008.0f, -1009.0f, -1010.0f, -1011.0f, -1012.0f, -1013.0f, -1014.0f, -1015.0f, -1016.0f, -1017.0f, -1018.0f, -1019.0f, 2040.0f, 2041.0f, 2042.0f, 2043.0f, 2044.0f, 2045.0f, 2046.0f, 2047.0f, 2048.0f, 2049.0f, 2050.0f, 2051.0f, 2052.0f, 2053.0f, 2054.0f, 2055.0f, 2056.0f, 2057.0f, 2058.0f, 2059.0f, 2060.0f, 2061.0f, 2062.0f, 2063.0f, 2064.0f, 2065.0f, 2066.0f, 2067.0f, 2068.0f, 2069.0f, 2070.0f, 2071.0f, 2072.0f, 2073.0f, 2074.0f, 2075.0f, 2076.0f, 2077.0f, 2078.0f, 2079.0f, 2080.0f, 2081.0f, 2082.0f, 2083.0f, 2084.0f, 2085.0f, 2086.0f, 2087.0f, 2088.0f, 2089.0f, 2090.0f, 2091.0f, 2092.0f, 2093.0f, 2094.0f, 2095.0f, 2096.0f, 2097.0f, 2098.0f, 2099.0f, -1020.0f, -1021.0f, -1022.0f, -1023.0f, -1024.0f, -1025.0f, -1026.0f, -1027.0f, -1028.0f, -1029.0f, -1030.0f, -1031.0f, -1032.0f, -1033.0f, -1034.0f, -1035.0f, -1036.0f, -1037.0f, -1038.0f, -1039.0f, -1040.0f, -1041.0f, -1042.0f, -1043.0f, -1044.0f, -1045.0f, -1046.0f, -1047.0f, -1048.0f, -1049.0f, 2100.0f, 2101.0f, 2102.0f, 2103.0f, 2104.0f, 2105.0f, 2106.0f, 2107.0f, 2108.0f, 2109.0f, 2110.0f, 2111.0f, 2112.0f, 2113.0f, 2114.0f, 2115.0f, 2116.0f, 2117.0f, 2118.0f, 2119.0f, 2120.0f, 2121.0f, 2122.0f, 2123.0f, 2124.0f, 2125.0f, 2126.0f, 2127.0f, 2128.0f, 2129.0f, 2130.0f, 2131.0f, 2132.0f, 2133.0f, 2134.0f, 2135.0f, 2136.0f, 2137.0f, 2138.0f, 2139.0f, 2140.0f, 2141.0f, 2142.0f, 2143.0f, 2144.0f, 2145.0f, 2146.0f, 2147.0f, 2148.0f, 2149.0f, 2150.0f, 2151.0f, 2152.0f, 2153.0f, 2154.0f, 2155.0f, 2156.0f, 2157.0f, 2158.0f, 2159.0f, -1050.0f, -1051.0f, -1052.0f, -1053.0f, -1054.0f, -1055.0f, -1056.0f, -1057.0f, -1058.0f, -1059.0f, -1060.0f, -1061.0f, -1062.0f, -1063.0f, -1064.0f, -1065.0f, -1066.0f, -1067.0f, -1068.0f, -1069.0f, -1070.0f, -1071.0f, -1072.0f, -1073.0f, -1074.0f, -1075.0f, -1076.0f, -1077.0f, -1078.0f, -1079.0f, 2160.0f, 2161.0f, 2162.0f, 2163.0f, 2164.0f, 2165.0f, 2166.0f, 2167.0f, 2168.0f, 2169.0f, 2170.0f, 2171.0f, 2172.0f, 2173.0f, 2174.0f, 2175.0f, 2176.0f, 2177.0f, 2178.0f, 2179.0f, 2180.0f, 2181.0f, 2182.0f, 2183.0f, 2184.0f, 2185.0f, 2186.0f, 2187.0f, 2188.0f, 2189.0f, 2190.0f, 2191.0f, 2192.0f, 2193.0f, 2194.0f, 2195.0f, 2196.0f, 2197.0f, 2198.0f, 2199.0f, 2200.0f, 2201.0f, 2202.0f, 2203.0f, 2204.0f, 2205.0f, 2206.0f, 2207.0f, 2208.0f, 2209.0f, 2210.0f, 2211.0f, 2212.0f, 2213.0f, 2214.0f, 2215.0f, 2216.0f, 2217.0f, 2218.0f, 2219.0f, -1080.0f, -1081.0f, -1082.0f, -1083.0f, -1084.0f, -1085.0f, -1086.0f, -1087.0f, -1088.0f, -1089.0f, -1090.0f, -1091.0f, -1092.0f, -1093.0f, -1094.0f, -1095.0f, -1096.0f, -1097.0f, -1098.0f, -1099.0f, -1100.0f, -1101.0f, -1102.0f, -1103.0f, -1104.0f, -1105.0f, -1106.0f, -1107.0f, -1108.0f, -1109.0f, 2220.0f, 2221.0f, 2222.0f, 2223.0f, 2224.0f, 2225.0f, 2226.0f, 2227.0f, 2228.0f, 2229.0f, 2230.0f, 2231.0f, 2232.0f, 2233.0f, 2234.0f, 2235.0f, 2236.0f, 2237.0f, 2238.0f, 2239.0f, 2240.0f, 2241.0f, 2242.0f, 2243.0f, 2244.0f, 2245.0f, 2246.0f, 2247.0f, 2248.0f, 2249.0f, 2250.0f, 2251.0f, 2252.0f, 2253.0f, 2254.0f, 2255.0f, 2256.0f, 2257.0f, 2258.0f, 2259.0f, 2260.0f, 2261.0f, 2262.0f, 2263.0f, 2264.0f, 2265.0f, 2266.0f, 2267.0f, 2268.0f, 2269.0f, 2270.0f, 2271.0f, 2272.0f, 2273.0f, 2274.0f, 2275.0f, 2276.0f, 2277.0f, 2278.0f, 2279.0f, -1110.0f, -1111.0f, -1112.0f, -1113.0f, -1114.0f, -1115.0f, -1116.0f, -1117.0f, -1118.0f, -1119.0f, -1120.0f, -1121.0f, -1122.0f, -1123.0f, -1124.0f, -1125.0f, -1126.0f, -1127.0f, -1128.0f, -1129.0f, -1130.0f, -1131.0f, -1132.0f, -1133.0f, -1134.0f, -1135.0f, -1136.0f, -1137.0f, -1138.0f, -1139.0f, 2280.0f, 2281.0f, 2282.0f, 2283.0f, 2284.0f, 2285.0f, 2286.0f, 2287.0f, 2288.0f, 2289.0f, 2290.0f, 2291.0f, 2292.0f, 2293.0f, 2294.0f, 2295.0f, 2296.0f, 2297.0f, 2298.0f, 2299.0f, 2300.0f, 2301.0f, 2302.0f, 2303.0f, 2304.0f, 2305.0f, 2306.0f, 2307.0f, 2308.0f, 2309.0f, 2310.0f, 2311.0f, 2312.0f, 2313.0f, 2314.0f, 2315.0f, 2316.0f, 2317.0f, 2318.0f, 2319.0f, 2320.0f, 2321.0f, 2322.0f, 2323.0f, 2324.0f, 2325.0f, 2326.0f, 2327.0f, 2328.0f, 2329.0f, 2330.0f, 2331.0f, 2332.0f, 2333.0f, 2334.0f, 2335.0f, 2336.0f, 2337.0f, 2338.0f, 2339.0f, -1140.0f, -1141.0f, -1142.0f, -1143.0f, -1144.0f, -1145.0f, -1146.0f, -1147.0f, -1148.0f, -1149.0f, -1150.0f, -1151.0f, -1152.0f, -1153.0f, -1154.0f, -1155.0f, -1156.0f, -1157.0f, -1158.0f, -1159.0f, -1160.0f, -1161.0f, -1162.0f, -1163.0f, -1164.0f, -1165.0f, -1166.0f, -1167.0f, -1168.0f, -1169.0f, 2340.0f, 2341.0f, 2342.0f, 2343.0f, 2344.0f, 2345.0f, 2346.0f, 2347.0f, 2348.0f, 2349.0f, 2350.0f, 2351.0f, 2352.0f, 2353.0f, 2354.0f, 2355.0f, 2356.0f, 2357.0f, 2358.0f, 2359.0f, 2360.0f, 2361.0f, 2362.0f, 2363.0f, 2364.0f, 2365.0f, 2366.0f, 2367.0f, 2368.0f, 2369.0f, 2370.0f, 2371.0f, 2372.0f, 2373.0f, 2374.0f, 2375.0f, 2376.0f, 2377.0f, 2378.0f, 2379.0f, 2380.0f, 2381.0f, 2382.0f, 2383.0f, 2384.0f, 2385.0f, 2386.0f, 2387.0f, 2388.0f, 2389.0f, 2390.0f, 2391.0f, 2392.0f, 2393.0f, 2394.0f, 2395.0f, 2396.0f, 2397.0f, 2398.0f, 2399.0f, -1170.0f, -1171.0f, -1172.0f, -1173.0f, -1174.0f, -1175.0f, -1176.0f, -1177.0f, -1178.0f, -1179.0f, -1180.0f, -1181.0f, -1182.0f, -1183.0f, -1184.0f, -1185.0f, -1186.0f, -1187.0f, -1188.0f, -1189.0f, -1190.0f, -1191.0f, -1192.0f, -1193.0f, -1194.0f, -1195.0f, -1196.0f, -1197.0f, -1198.0f, -1199.0f, 2400.0f, 2401.0f, 2402.0f, 2403.0f, 2404.0f, 2405.0f, 2406.0f, 2407.0f, 2408.0f, 2409.0f, 2410.0f, 2411.0f, 2412.0f, 2413.0f, 2414.0f, 2415.0f, 2416.0f, 2417.0f, 2418.0f, 2419.0f, 2420.0f, 2421.0f, 2422.0f, 2423.0f, 2424.0f, 2425.0f, 2426.0f, 2427.0f, 2428.0f, 2429.0f, 2430.0f, 2431.0f, 2432.0f, 2433.0f, 2434.0f, 2435.0f, 2436.0f, 2437.0f, 2438.0f, 2439.0f, 2440.0f, 2441.0f, 2442.0f, 2443.0f, 2444.0f, 2445.0f, 2446.0f, 2447.0f, 2448.0f, 2449.0f, 2450.0f, 2451.0f, 2452.0f, 2453.0f, 2454.0f, 2455.0f, 2456.0f, 2457.0f, 2458.0f, 2459.0f, -1200.0f, -1201.0f, -1202.0f, -1203.0f, -1204.0f, -1205.0f, -1206.0f, -1207.0f, -1208.0f, -1209.0f, -1210.0f, -1211.0f, -1212.0f, -1213.0f, -1214.0f, -1215.0f, -1216.0f, -1217.0f, -1218.0f, -1219.0f, -1220.0f, -1221.0f, -1222.0f, -1223.0f, -1224.0f, -1225.0f, -1226.0f, -1227.0f, -1228.0f, -1229.0f, 2460.0f, 2461.0f, 2462.0f, 2463.0f, 2464.0f, 2465.0f, 2466.0f, 2467.0f, 2468.0f, 2469.0f, 2470.0f, 2471.0f, 2472.0f, 2473.0f, 2474.0f, 2475.0f, 2476.0f, 2477.0f, 2478.0f, 2479.0f, 2480.0f, 2481.0f, 2482.0f, 2483.0f, 2484.0f, 2485.0f, 2486.0f, 2487.0f, 2488.0f, 2489.0f, 2490.0f, 2491.0f, 2492.0f, 2493.0f, 2494.0f, 2495.0f, 2496.0f, 2497.0f, 2498.0f, 2499.0f, 2500.0f, 2501.0f, 2502.0f, 2503.0f, 2504.0f, 2505.0f, 2506.0f, 2507.0f, 2508.0f, 2509.0f, 2510.0f, 2511.0f, 2512.0f, 2513.0f, 2514.0f, 2515.0f, 2516.0f, 2517.0f, 2518.0f, 2519.0f, -1230.0f, -1231.0f, -1232.0f, -1233.0f, -1234.0f, -1235.0f, -1236.0f, -1237.0f, -1238.0f, -1239.0f, -1240.0f, -1241.0f, -1242.0f, -1243.0f, -1244.0f, -1245.0f, -1246.0f, -1247.0f, -1248.0f, -1249.0f, -1250.0f, -1251.0f, -1252.0f, -1253.0f, -1254.0f, -1255.0f, -1256.0f, -1257.0f, -1258.0f, -1259.0f, 2520.0f, 2521.0f, 2522.0f, 2523.0f, 2524.0f, 2525.0f, 2526.0f, 2527.0f, 2528.0f, 2529.0f, 2530.0f, 2531.0f, 2532.0f, 2533.0f, 2534.0f, 2535.0f, 2536.0f, 2537.0f, 2538.0f, 2539.0f, 2540.0f, 2541.0f, 2542.0f, 2543.0f, 2544.0f, 2545.0f, 2546.0f, 2547.0f, 2548.0f, 2549.0f, 2550.0f, 2551.0f, 2552.0f, 2553.0f, 2554.0f, 2555.0f, 2556.0f, 2557.0f, 2558.0f, 2559.0f, 2560.0f, 2561.0f, 2562.0f, 2563.0f, 2564.0f, 2565.0f, 2566.0f, 2567.0f, 2568.0f, 2569.0f, 2570.0f, 2571.0f, 2572.0f, 2573.0f, 2574.0f, 2575.0f, 2576.0f, 2577.0f, 2578.0f, 2579.0f, -1260.0f, -1261.0f, -1262.0f, -1263.0f, -1264.0f, -1265.0f, -1266.0f, -1267.0f, -1268.0f, -1269.0f, -1270.0f, -1271.0f, -1272.0f, -1273.0f, -1274.0f, -1275.0f, -1276.0f, -1277.0f, -1278.0f, -1279.0f, -1280.0f, -1281.0f, -1282.0f, -1283.0f, -1284.0f, -1285.0f, -1286.0f, -1287.0f, -1288.0f, -1289.0f, 2580.0f, 2581.0f, 2582.0f, 2583.0f, 2584.0f, 2585.0f, 2586.0f, 2587.0f, 2588.0f, 2589.0f, 2590.0f, 2591.0f, 2592.0f, 2593.0f, 2594.0f, 2595.0f, 2596.0f, 2597.0f, 2598.0f, 2599.0f, 2600.0f, 2601.0f, 2602.0f, 2603.0f, 2604.0f, 2605.0f, 2606.0f, 2607.0f, 2608.0f, 2609.0f, 2610.0f, 2611.0f, 2612.0f, 2613.0f, 2614.0f, 2615.0f, 2616.0f, 2617.0f, 2618.0f, 2619.0f, 2620.0f, 2621.0f, 2622.0f, 2623.0f, 2624.0f, 2625.0f, 2626.0f, 2627.0f, 2628.0f, 2629.0f, 2630.0f, 2631.0f, 2632.0f, 2633.0f, 2634.0f, 2635.0f, 2636.0f, 2637.0f, 2638.0f, 2639.0f, -1290.0f, -1291.0f, -1292.0f, -1293.0f, -1294.0f, -1295.0f, -1296.0f, -1297.0f, -1298.0f, -1299.0f, -1300.0f, -1301.0f, -1302.0f, -1303.0f, -1304.0f, -1305.0f, -1306.0f, -1307.0f, -1308.0f, -1309.0f, -1310.0f, -1311.0f, -1312.0f, -1313.0f, -1314.0f, -1315.0f, -1316.0f, -1317.0f, -1318.0f, -1319.0f, 2640.0f, 2641.0f, 2642.0f, 2643.0f, 2644.0f, 2645.0f, 2646.0f, 2647.0f, 2648.0f, 2649.0f, 2650.0f, 2651.0f, 2652.0f, 2653.0f, 2654.0f, 2655.0f, 2656.0f, 2657.0f, 2658.0f, 2659.0f, 2660.0f, 2661.0f, 2662.0f, 2663.0f, 2664.0f, 2665.0f, 2666.0f, 2667.0f, 2668.0f, 2669.0f, 2670.0f, 2671.0f, 2672.0f, 2673.0f, 2674.0f, 2675.0f, 2676.0f, 2677.0f, 2678.0f, 2679.0f, 2680.0f, 2681.0f, 2682.0f, 2683.0f, 2684.0f, 2685.0f, 2686.0f, 2687.0f, 2688.0f, 2689.0f, 2690.0f, 2691.0f, 2692.0f, 2693.0f, 2694.0f, 2695.0f, 2696.0f, 2697.0f, 2698.0f, 2699.0f, -1320.0f, -1321.0f, -1322.0f, -1323.0f, -1324.0f, -1325.0f, -1326.0f, -1327.0f, -1328.0f, -1329.0f, -1330.0f, -1331.0f, -1332.0f, -1333.0f, -1334.0f, -1335.0f, -1336.0f, -1337.0f, -1338.0f, -1339.0f, -1340.0f, -1341.0f, -1342.0f, -1343.0f, -1344.0f, -1345.0f, -1346.0f, -1347.0f, -1348.0f, -1349.0f, 2700.0f, 2701.0f, 2702.0f, 2703.0f, 2704.0f, 2705.0f, 2706.0f, 2707.0f, 2708.0f, 2709.0f, 2710.0f, 2711.0f, 2712.0f, 2713.0f, 2714.0f, 2715.0f, 2716.0f, 2717.0f, 2718.0f, 2719.0f, 2720.0f, 2721.0f, 2722.0f, 2723.0f, 2724.0f, 2725.0f, 2726.0f, 2727.0f, 2728.0f, 2729.0f, 2730.0f, 2731.0f, 2732.0f, 2733.0f, 2734.0f, 2735.0f, 2736.0f, 2737.0f, 2738.0f, 2739.0f, 2740.0f, 2741.0f, 2742.0f, 2743.0f, 2744.0f, 2745.0f, 2746.0f, 2747.0f, 2748.0f, 2749.0f, 2750.0f, 2751.0f, 2752.0f, 2753.0f, 2754.0f, 2755.0f, 2756.0f, 2757.0f, 2758.0f, 2759.0f, -1350.0f, -1351.0f, -1352.0f, -1353.0f, -1354.0f, -1355.0f, -1356.0f, -1357.0f, -1358.0f, -1359.0f, -1360.0f, -1361.0f, -1362.0f, -1363.0f, -1364.0f, -1365.0f, -1366.0f, -1367.0f, -1368.0f, -1369.0f, -1370.0f, -1371.0f, -1372.0f, -1373.0f, -1374.0f, -1375.0f, -1376.0f, -1377.0f, -1378.0f, -1379.0f, 2760.0f, 2761.0f, 2762.0f, 2763.0f, 2764.0f, 2765.0f, 2766.0f, 2767.0f, 2768.0f, 2769.0f, 2770.0f, 2771.0f, 2772.0f, 2773.0f, 2774.0f, 2775.0f, 2776.0f, 2777.0f, 2778.0f, 2779.0f, 2780.0f, 2781.0f, 2782.0f, 2783.0f, 2784.0f, 2785.0f, 2786.0f, 2787.0f, 2788.0f, 2789.0f, 2790.0f, 2791.0f, 2792.0f, 2793.0f, 2794.0f, 2795.0f, 2796.0f, 2797.0f, 2798.0f, 2799.0f, 2800.0f, 2801.0f, 2802.0f, 2803.0f, 2804.0f, 2805.0f, 2806.0f, 2807.0f, 2808.0f, 2809.0f, 2810.0f, 2811.0f, 2812.0f, 2813.0f, 2814.0f, 2815.0f, 2816.0f, 2817.0f, 2818.0f, 2819.0f, -1380.0f, -1381.0f, -1382.0f, -1383.0f, -1384.0f, -1385.0f, -1386.0f, -1387.0f, -1388.0f, -1389.0f, -1390.0f, -1391.0f, -1392.0f, -1393.0f, -1394.0f, -1395.0f, -1396.0f, -1397.0f, -1398.0f, -1399.0f, -1400.0f, -1401.0f, -1402.0f, -1403.0f, -1404.0f, -1405.0f, -1406.0f, -1407.0f, -1408.0f, -1409.0f, 2820.0f, 2821.0f, 2822.0f, 2823.0f, 2824.0f, 2825.0f, 2826.0f, 2827.0f, 2828.0f, 2829.0f, 2830.0f, 2831.0f, 2832.0f, 2833.0f, 2834.0f, 2835.0f, 2836.0f, 2837.0f, 2838.0f, 2839.0f, 2840.0f, 2841.0f, 2842.0f, 2843.0f, 2844.0f, 2845.0f, 2846.0f, 2847.0f, 2848.0f, 2849.0f, 2850.0f, 2851.0f, 2852.0f, 2853.0f, 2854.0f, 2855.0f, 2856.0f, 2857.0f, 2858.0f, 2859.0f, 2860.0f, 2861.0f, 2862.0f, 2863.0f, 2864.0f, 2865.0f, 2866.0f, 2867.0f, 2868.0f, 2869.0f, 2870.0f, 2871.0f, 2872.0f, 2873.0f, 2874.0f, 2875.0f, 2876.0f, 2877.0f, 2878.0f, 2879.0f, -1410.0f, -1411.0f, -1412.0f, -1413.0f, -1414.0f, -1415.0f, -1416.0f, -1417.0f, -1418.0f, -1419.0f, -1420.0f, -1421.0f, -1422.0f, -1423.0f, -1424.0f, -1425.0f, -1426.0f, -1427.0f, -1428.0f, -1429.0f, -1430.0f, -1431.0f, -1432.0f, -1433.0f, -1434.0f, -1435.0f, -1436.0f, -1437.0f, -1438.0f, -1439.0f, 2880.0f, 2881.0f, 2882.0f, 2883.0f, 2884.0f, 2885.0f, 2886.0f, 2887.0f, 2888.0f, 2889.0f, 2890.0f, 2891.0f, 2892.0f, 2893.0f, 2894.0f, 2895.0f, 2896.0f, 2897.0f, 2898.0f, 2899.0f, 2900.0f, 2901.0f, 2902.0f, 2903.0f, 2904.0f, 2905.0f, 2906.0f, 2907.0f, 2908.0f, 2909.0f, 2910.0f, 2911.0f, 2912.0f, 2913.0f, 2914.0f, 2915.0f, 2916.0f, 2917.0f, 2918.0f, 2919.0f, 2920.0f, 2921.0f, 2922.0f, 2923.0f, 2924.0f, 2925.0f, 2926.0f, 2927.0f, 2928.0f, 2929.0f, 2930.0f, 2931.0f, 2932.0f, 2933.0f, 2934.0f, 2935.0f, 2936.0f, 2937.0f, 2938.0f, 2939.0f, -1440.0f, -1441.0f, -1442.0f, -1443.0f, -1444.0f, -1445.0f, -1446.0f, -1447.0f, -1448.0f, -1449.0f, -1450.0f, -1451.0f, -1452.0f, -1453.0f, -1454.0f, -1455.0f, -1456.0f, -1457.0f, -1458.0f, -1459.0f, -1460.0f, -1461.0f, -1462.0f, -1463.0f, -1464.0f, -1465.0f, -1466.0f, -1467.0f, -1468.0f, -1469.0f, 2940.0f, 2941.0f, 2942.0f, 2943.0f, 2944.0f, 2945.0f, 2946.0f, 2947.0f, 2948.0f, 2949.0f, 2950.0f, 2951.0f, 2952.0f, 2953.0f, 2954.0f, 2955.0f, 2956.0f, 2957.0f, 2958.0f, 2959.0f, 2960.0f, 2961.0f, 2962.0f, 2963.0f, 2964.0f, 2965.0f, 2966.0f, 2967.0f, 2968.0f, 2969.0f, 2970.0f, 2971.0f, 2972.0f, 2973.0f, 2974.0f, 2975.0f, 2976.0f, 2977.0f, 2978.0f, 2979.0f, 2980.0f, 2981.0f, 2982.0f, 2983.0f, 2984.0f, 2985.0f, 2986.0f, 2987.0f, 2988.0f, 2989.0f, 2990.0f, 2991.0f, 2992.0f, 2993.0f, 2994.0f, 2995.0f, 2996.0f, 2997.0f, 2998.0f, 2999.0f, -1470.0f, -1471.0f, -1472.0f, -1473.0f, -1474.0f, -1475.0f, -1476.0f, -1477.0f, -1478.0f, -1479.0f, -1480.0f, -1481.0f, -1482.0f, -1483.0f, -1484.0f, -1485.0f, -1486.0f, -1487.0f, -1488.0f, -1489.0f, -1490.0f, -1491.0f, -1492.0f, -1493.0f, -1494.0f, -1495.0f, -1496.0f, -1497.0f, -1498.0f, -1499.0f, 3000.0f, 3001.0f, 3002.0f, 3003.0f, 3004.0f, 3005.0f, 3006.0f, 3007.0f, 3008.0f, 3009.0f, 3010.0f, 3011.0f, 3012.0f, 3013.0f, 3014.0f, 3015.0f, 3016.0f, 3017.0f, 3018.0f, 3019.0f, 3020.0f, 3021.0f, 3022.0f, 3023.0f, 3024.0f, 3025.0f, 3026.0f, 3027.0f, 3028.0f, 3029.0f, 3030.0f, 3031.0f, 3032.0f, 3033.0f, 3034.0f, 3035.0f, 3036.0f, 3037.0f, 3038.0f, 3039.0f, 3040.0f, 3041.0f, 3042.0f, 3043.0f, 3044.0f, 3045.0f, 3046.0f, 3047.0f, 3048.0f, 3049.0f, 3050.0f, 3051.0f, 3052.0f, 3053.0f, 3054.0f, 3055.0f, 3056.0f, 3057.0f, 3058.0f, 3059.0f, -1500.0f, -1501.0f, -1502.0f, -1503.0f, -1504.0f, -1505.0f, -1506.0f, -1507.0f, -1508.0f, -1509.0f, -1510.0f, -1511.0f, -1512.0f, -1513.0f, -1514.0f, -1515.0f, -1516.0f, -1517.0f, -1518.0f, -1519.0f, -1520.0f, -1521.0f, -1522.0f, -1523.0f, -1524.0f, -1525.0f, -1526.0f, -1527.0f, -1528.0f, -1529.0f, 3060.0f, 3061.0f, 3062.0f, 3063.0f, 3064.0f, 3065.0f, 3066.0f, 3067.0f, 3068.0f, 3069.0f, 3070.0f, 3071.0f, 3072.0f, 3073.0f, 3074.0f, 3075.0f, 3076.0f, 3077.0f, 3078.0f, 3079.0f, 3080.0f, 3081.0f, 3082.0f, 3083.0f, 3084.0f, 3085.0f, 3086.0f, 3087.0f, 3088.0f, 3089.0f, 3090.0f, 3091.0f, 3092.0f, 3093.0f, 3094.0f, 3095.0f, 3096.0f, 3097.0f, 3098.0f, 3099.0f, 3100.0f, 3101.0f, 3102.0f, 3103.0f, 3104.0f, 3105.0f, 3106.0f, 3107.0f, 3108.0f, 3109.0f, 3110.0f, 3111.0f, 3112.0f, 3113.0f, 3114.0f, 3115.0f, 3116.0f, 3117.0f, 3118.0f, 3119.0f, -1530.0f, -1531.0f, -1532.0f, -1533.0f, -1534.0f, -1535.0f, -1536.0f, -1537.0f, -1538.0f, -1539.0f, -1540.0f, -1541.0f, -1542.0f, -1543.0f, -1544.0f, -1545.0f, -1546.0f, -1547.0f, -1548.0f, -1549.0f, -1550.0f, -1551.0f, -1552.0f, -1553.0f, -1554.0f, -1555.0f, -1556.0f, -1557.0f, -1558.0f, -1559.0f, 3120.0f, 3121.0f, 3122.0f, 3123.0f, 3124.0f, 3125.0f, 3126.0f, 3127.0f, 3128.0f, 3129.0f, 3130.0f, 3131.0f, 3132.0f, 3133.0f, 3134.0f, 3135.0f, 3136.0f, 3137.0f, 3138.0f, 3139.0f, 3140.0f, 3141.0f, 3142.0f, 3143.0f, 3144.0f, 3145.0f, 3146.0f, 3147.0f, 3148.0f, 3149.0f, 3150.0f, 3151.0f, 3152.0f, 3153.0f, 3154.0f, 3155.0f, 3156.0f, 3157.0f, 3158.0f, 3159.0f, 3160.0f, 3161.0f, 3162.0f, 3163.0f, 3164.0f, 3165.0f, 3166.0f, 3167.0f, 3168.0f, 3169.0f, 3170.0f, 3171.0f, 3172.0f, 3173.0f, 3174.0f, 3175.0f, 3176.0f, 3177.0f, 3178.0f, 3179.0f, -1560.0f, -1561.0f, -1562.0f, -1563.0f, -1564.0f, -1565.0f, -1566.0f, -1567.0f, -1568.0f, -1569.0f, -1570.0f, -1571.0f, -1572.0f, -1573.0f, -1574.0f, -1575.0f, -1576.0f, -1577.0f, -1578.0f, -1579.0f, -1580.0f, -1581.0f, -1582.0f, -1583.0f, -1584.0f, -1585.0f, -1586.0f, -1587.0f, -1588.0f, -1589.0f, 3180.0f, 3181.0f, 3182.0f, 3183.0f, 3184.0f, 3185.0f, 3186.0f, 3187.0f, 3188.0f, 3189.0f, 3190.0f, 3191.0f, 3192.0f, 3193.0f, 3194.0f, 3195.0f, 3196.0f, 3197.0f, 3198.0f, 3199.0f, 3200.0f, 3201.0f, 3202.0f, 3203.0f, 3204.0f, 3205.0f, 3206.0f, 3207.0f, 3208.0f, 3209.0f, 3210.0f, 3211.0f, 3212.0f, 3213.0f, 3214.0f, 3215.0f, 3216.0f, 3217.0f, 3218.0f, 3219.0f, 3220.0f, 3221.0f, 3222.0f, 3223.0f, 3224.0f, 3225.0f, 3226.0f, 3227.0f, 3228.0f, 3229.0f, 3230.0f, 3231.0f, 3232.0f, 3233.0f, 3234.0f, 3235.0f, 3236.0f, 3237.0f, 3238.0f, 3239.0f, -1590.0f, -1591.0f, -1592.0f, -1593.0f, -1594.0f, -1595.0f, -1596.0f, -1597.0f, -1598.0f, -1599.0f, -1600.0f, -1601.0f, -1602.0f, -1603.0f, -1604.0f, -1605.0f, -1606.0f, -1607.0f, -1608.0f, -1609.0f, -1610.0f, -1611.0f, -1612.0f, -1613.0f, -1614.0f, -1615.0f, -1616.0f, -1617.0f, -1618.0f, -1619.0f, 3240.0f, 3241.0f, 3242.0f, 3243.0f, 3244.0f, 3245.0f, 3246.0f, 3247.0f, 3248.0f, 3249.0f, 3250.0f, 3251.0f, 3252.0f, 3253.0f, 3254.0f, 3255.0f, 3256.0f, 3257.0f, 3258.0f, 3259.0f, 3260.0f, 3261.0f, 3262.0f, 3263.0f, 3264.0f, 3265.0f, 3266.0f, 3267.0f, 3268.0f, 3269.0f, 3270.0f, 3271.0f, 3272.0f, 3273.0f, 3274.0f, 3275.0f, 3276.0f, 3277.0f, 3278.0f, 3279.0f, 3280.0f, 3281.0f, 3282.0f, 3283.0f, 3284.0f, 3285.0f, 3286.0f, 3287.0f, 3288.0f, 3289.0f, 3290.0f, 3291.0f, 3292.0f, 3293.0f, 3294.0f, 3295.0f, 3296.0f, 3297.0f, 3298.0f, 3299.0f, -1620.0f, -1621.0f, -1622.0f, -1623.0f, -1624.0f, -1625.0f, -1626.0f, -1627.0f, -1628.0f, -1629.0f, -1630.0f, -1631.0f, -1632.0f, -1633.0f, -1634.0f, -1635.0f, -1636.0f, -1637.0f, -1638.0f, -1639.0f, -1640.0f, -1641.0f, -1642.0f, -1643.0f, -1644.0f, -1645.0f, -1646.0f, -1647.0f, -1648.0f, -1649.0f, 3300.0f, 3301.0f, 3302.0f, 3303.0f, 3304.0f, 3305.0f, 3306.0f, 3307.0f, 3308.0f, 3309.0f, 3310.0f, 3311.0f, 3312.0f, 3313.0f, 3314.0f, 3315.0f, 3316.0f, 3317.0f, 3318.0f, 3319.0f, 3320.0f, 3321.0f, 3322.0f, 3323.0f, 3324.0f, 3325.0f, 3326.0f, 3327.0f, 3328.0f, 3329.0f, 3330.0f, 3331.0f, 3332.0f, 3333.0f, 3334.0f, 3335.0f, 3336.0f, 3337.0f, 3338.0f, 3339.0f, 3340.0f, 3341.0f, 3342.0f, 3343.0f, 3344.0f, 3345.0f, 3346.0f, 3347.0f, 3348.0f, 3349.0f, 3350.0f, 3351.0f, 3352.0f, 3353.0f, 3354.0f, 3355.0f, 3356.0f, 3357.0f, 3358.0f, 3359.0f, -1650.0f, -1651.0f, -1652.0f, -1653.0f, -1654.0f, -1655.0f, -1656.0f, -1657.0f, -1658.0f, -1659.0f, -1660.0f, -1661.0f, -1662.0f, -1663.0f, -1664.0f, -1665.0f, -1666.0f, -1667.0f, -1668.0f, -1669.0f, -1670.0f, -1671.0f, -1672.0f, -1673.0f, -1674.0f, -1675.0f, -1676.0f, -1677.0f, -1678.0f, -1679.0f, 3360.0f, 3361.0f, 3362.0f, 3363.0f, 3364.0f, 3365.0f, 3366.0f, 3367.0f, 3368.0f, 3369.0f, 3370.0f, 3371.0f, 3372.0f, 3373.0f, 3374.0f, 3375.0f, 3376.0f, 3377.0f, 3378.0f, 3379.0f, 3380.0f, 3381.0f, 3382.0f, 3383.0f, 3384.0f, 3385.0f, 3386.0f, 3387.0f, 3388.0f, 3389.0f, 3390.0f, 3391.0f, 3392.0f, 3393.0f, 3394.0f, 3395.0f, 3396.0f, 3397.0f, 3398.0f, 3399.0f, 3400.0f, 3401.0f, 3402.0f, 3403.0f, 3404.0f, 3405.0f, 3406.0f, 3407.0f, 3408.0f, 3409.0f, 3410.0f, 3411.0f, 3412.0f, 3413.0f, 3414.0f, 3415.0f, 3416.0f, 3417.0f, 3418.0f, 3419.0f, -1680.0f, -1681.0f, -1682.0f, -1683.0f, -1684.0f, -1685.0f, -1686.0f, -1687.0f, -1688.0f, -1689.0f, -1690.0f, -1691.0f, -1692.0f, -1693.0f, -1694.0f, -1695.0f, -1696.0f, -1697.0f, -1698.0f, -1699.0f, -1700.0f, -1701.0f, -1702.0f, -1703.0f, -1704.0f, -1705.0f, -1706.0f, -1707.0f, -1708.0f, -1709.0f, 3420.0f, 3421.0f, 3422.0f, 3423.0f, 3424.0f, 3425.0f, 3426.0f, 3427.0f, 3428.0f, 3429.0f, 3430.0f, 3431.0f, 3432.0f, 3433.0f, 3434.0f, 3435.0f, 3436.0f, 3437.0f, 3438.0f, 3439.0f, 3440.0f, 3441.0f, 3442.0f, 3443.0f, 3444.0f, 3445.0f, 3446.0f, 3447.0f, 3448.0f, 3449.0f, 3450.0f, 3451.0f, 3452.0f, 3453.0f, 3454.0f, 3455.0f, 3456.0f, 3457.0f, 3458.0f, 3459.0f, 3460.0f, 3461.0f, 3462.0f, 3463.0f, 3464.0f, 3465.0f, 3466.0f, 3467.0f, 3468.0f, 3469.0f, 3470.0f, 3471.0f, 3472.0f, 3473.0f, 3474.0f, 3475.0f, 3476.0f, 3477.0f, 3478.0f, 3479.0f, -1710.0f, -1711.0f, -1712.0f, -1713.0f, -1714.0f, -1715.0f, -1716.0f, -1717.0f, -1718.0f, -1719.0f, -1720.0f, -1721.0f, -1722.0f, -1723.0f, -1724.0f, -1725.0f, -1726.0f, -1727.0f, -1728.0f, -1729.0f, -1730.0f, -1731.0f, -1732.0f, -1733.0f, -1734.0f, -1735.0f, -1736.0f, -1737.0f, -1738.0f, -1739.0f, 3480.0f, 3481.0f, 3482.0f, 3483.0f, 3484.0f, 3485.0f, 3486.0f, 3487.0f, 3488.0f, 3489.0f, 3490.0f, 3491.0f, 3492.0f, 3493.0f, 3494.0f, 3495.0f, 3496.0f, 3497.0f, 3498.0f, 3499.0f, 3500.0f, 3501.0f, 3502.0f, 3503.0f, 3504.0f, 3505.0f, 3506.0f, 3507.0f, 3508.0f, 3509.0f, 3510.0f, 3511.0f, 3512.0f, 3513.0f, 3514.0f, 3515.0f, 3516.0f, 3517.0f, 3518.0f, 3519.0f, 3520.0f, 3521.0f, 3522.0f, 3523.0f, 3524.0f, 3525.0f, 3526.0f, 3527.0f, 3528.0f, 3529.0f, 3530.0f, 3531.0f, 3532.0f, 3533.0f, 3534.0f, 3535.0f, 3536.0f, 3537.0f, 3538.0f, 3539.0f, -1740.0f, -1741.0f, -1742.0f, -1743.0f, -1744.0f, -1745.0f, -1746.0f, -1747.0f, -1748.0f, -1749.0f, -1750.0f, -1751.0f, -1752.0f, -1753.0f, -1754.0f, -1755.0f, -1756.0f, -1757.0f, -1758.0f, -1759.0f, -1760.0f, -1761.0f, -1762.0f, -1763.0f, -1764.0f, -1765.0f, -1766.0f, -1767.0f, -1768.0f, -1769.0f, 3540.0f, 3541.0f, 3542.0f, 3543.0f, 3544.0f, 3545.0f, 3546.0f, 3547.0f, 3548.0f, 3549.0f, 3550.0f, 3551.0f, 3552.0f, 3553.0f, 3554.0f, 3555.0f, 3556.0f, 3557.0f, 3558.0f, 3559.0f, 3560.0f, 3561.0f, 3562.0f, 3563.0f, 3564.0f, 3565.0f, 3566.0f, 3567.0f, 3568.0f, 3569.0f, 3570.0f, 3571.0f, 3572.0f, 3573.0f, 3574.0f, 3575.0f, 3576.0f, 3577.0f, 3578.0f, 3579.0f, 3580.0f, 3581.0f, 3582.0f, 3583.0f, 3584.0f, 3585.0f, 3586.0f, 3587.0f, 3588.0f, 3589.0f, 3590.0f, 3591.0f, 3592.0f, 3593.0f, 3594.0f, 3595.0f, 3596.0f, 3597.0f, 3598.0f, 3599.0f, -1770.0f, -1771.0f, -1772.0f, -1773.0f, -1774.0f, -1775.0f, -1776.0f, -1777.0f, -1778.0f, -1779.0f, -1780.0f, -1781.0f, -1782.0f, -1783.0f, -1784.0f, -1785.0f, -1786.0f, -1787.0f, -1788.0f, -1789.0f, -1790.0f, -1791.0f, -1792.0f, -1793.0f, -1794.0f, -1795.0f, -1796.0f, -1797.0f, -1798.0f, -1799.0f, 3600.0f, 3601.0f, 3602.0f, 3603.0f, 3604.0f, 3605.0f, 3606.0f, 3607.0f, 3608.0f, 3609.0f, 3610.0f, 3611.0f, 3612.0f, 3613.0f, 3614.0f, 3615.0f, 3616.0f, 3617.0f, 3618.0f, 3619.0f, 3620.0f, 3621.0f, 3622.0f, 3623.0f, 3624.0f, 3625.0f, 3626.0f, 3627.0f, 3628.0f, 3629.0f, 3630.0f, 3631.0f, 3632.0f, 3633.0f, 3634.0f, 3635.0f, 3636.0f, 3637.0f, 3638.0f, 3639.0f, 3640.0f, 3641.0f, 3642.0f, 3643.0f, 3644.0f, 3645.0f, 3646.0f, 3647.0f, 3648.0f, 3649.0f, 3650.0f, 3651.0f, 3652.0f, 3653.0f, 3654.0f, 3655.0f, 3656.0f, 3657.0f, 3658.0f, 3659.0f, -1800.0f, -1801.0f, -1802.0f, -1803.0f, -1804.0f, -1805.0f, -1806.0f, -1807.0f, -1808.0f, -1809.0f, -1810.0f, -1811.0f, -1812.0f, -1813.0f, -1814.0f, -1815.0f, -1816.0f, -1817.0f, -1818.0f, -1819.0f, -1820.0f, -1821.0f, -1822.0f, -1823.0f, -1824.0f, -1825.0f, -1826.0f, -1827.0f, -1828.0f, -1829.0f, 3660.0f, 3661.0f, 3662.0f, 3663.0f, 3664.0f, 3665.0f, 3666.0f, 3667.0f, 3668.0f, 3669.0f, 3670.0f, 3671.0f, 3672.0f, 3673.0f, 3674.0f, 3675.0f, 3676.0f, 3677.0f, 3678.0f, 3679.0f, 3680.0f, 3681.0f, 3682.0f, 3683.0f, 3684.0f, 3685.0f, 3686.0f, 3687.0f, 3688.0f, 3689.0f, 3690.0f, 3691.0f, 3692.0f, 3693.0f, 3694.0f, 3695.0f, 3696.0f, 3697.0f, 3698.0f, 3699.0f, 3700.0f, 3701.0f, 3702.0f, 3703.0f, 3704.0f, 3705.0f, 3706.0f, 3707.0f, 3708.0f, 3709.0f, 3710.0f, 3711.0f, 3712.0f, 3713.0f, 3714.0f, 3715.0f, 3716.0f, 3717.0f, 3718.0f, 3719.0f, -1830.0f, -1831.0f, -1832.0f, -1833.0f, -1834.0f, -1835.0f, -1836.0f, -1837.0f, -1838.0f, -1839.0f, -1840.0f, -1841.0f, -1842.0f, -1843.0f, -1844.0f, -1845.0f, -1846.0f, -1847.0f, -1848.0f, -1849.0f, -1850.0f, -1851.0f, -1852.0f, -1853.0f, -1854.0f, -1855.0f, -1856.0f, -1857.0f, -1858.0f, -1859.0f, 3720.0f, 3721.0f, 3722.0f, 3723.0f, 3724.0f, 3725.0f, 3726.0f, 3727.0f, 3728.0f, 3729.0f, 3730.0f, 3731.0f, 3732.0f, 3733.0f, 3734.0f, 3735.0f, 3736.0f, 3737.0f, 3738.0f, 3739.0f, 3740.0f, 3741.0f, 3742.0f, 3743.0f, 3744.0f, 3745.0f, 3746.0f, 3747.0f, 3748.0f, 3749.0f, 3750.0f, 3751.0f, 3752.0f, 3753.0f, 3754.0f, 3755.0f, 3756.0f, 3757.0f, 3758.0f, 3759.0f, 3760.0f, 3761.0f, 3762.0f, 3763.0f, 3764.0f, 3765.0f, 3766.0f, 3767.0f, 3768.0f, 3769.0f, 3770.0f, 3771.0f, 3772.0f, 3773.0f, 3774.0f, 3775.0f, 3776.0f, 3777.0f, 3778.0f, 3779.0f, -1860.0f, -1861.0f, -1862.0f, -1863.0f, -1864.0f, -1865.0f, -1866.0f, -1867.0f, -1868.0f, -1869.0f, -1870.0f, -1871.0f, -1872.0f, -1873.0f, -1874.0f, -1875.0f, -1876.0f, -1877.0f, -1878.0f, -1879.0f, -1880.0f, -1881.0f, -1882.0f, -1883.0f, -1884.0f, -1885.0f, -1886.0f, -1887.0f, -1888.0f, -1889.0f, 3780.0f, 3781.0f, 3782.0f, 3783.0f, 3784.0f, 3785.0f, 3786.0f, 3787.0f, 3788.0f, 3789.0f, 3790.0f, 3791.0f, 3792.0f, 3793.0f, 3794.0f, 3795.0f, 3796.0f, 3797.0f, 3798.0f, 3799.0f, 3800.0f, 3801.0f, 3802.0f, 3803.0f, 3804.0f, 3805.0f, 3806.0f, 3807.0f, 3808.0f, 3809.0f, 3810.0f, 3811.0f, 3812.0f, 3813.0f, 3814.0f, 3815.0f, 3816.0f, 3817.0f, 3818.0f, 3819.0f, 3820.0f, 3821.0f, 3822.0f, 3823.0f, 3824.0f, 3825.0f, 3826.0f, 3827.0f, 3828.0f, 3829.0f, 3830.0f, 3831.0f, 3832.0f, 3833.0f, 3834.0f, 3835.0f, 3836.0f, 3837.0f, 3838.0f, 3839.0f, -1890.0f, -1891.0f, -1892.0f, -1893.0f, -1894.0f, -1895.0f, -1896.0f, -1897.0f, -1898.0f, -1899.0f, -1900.0f, -1901.0f, -1902.0f, -1903.0f, -1904.0f, -1905.0f, -1906.0f, -1907.0f, -1908.0f, -1909.0f, -1910.0f, -1911.0f, -1912.0f, -1913.0f, -1914.0f, -1915.0f, -1916.0f, -1917.0f, -1918.0f, -1919.0f, 3840.0f, 3841.0f, 3842.0f, 3843.0f, 3844.0f, 3845.0f, 3846.0f, 3847.0f, 3848.0f, 3849.0f, 3850.0f, 3851.0f, 3852.0f, 3853.0f, 3854.0f, 3855.0f, 3856.0f, 3857.0f, 3858.0f, 3859.0f, 3860.0f, 3861.0f, 3862.0f, 3863.0f, 3864.0f, 3865.0f, 3866.0f, 3867.0f, 3868.0f, 3869.0f, 3870.0f, 3871.0f, 3872.0f, 3873.0f, 3874.0f, 3875.0f, 3876.0f, 3877.0f, 3878.0f, 3879.0f, 3880.0f, 3881.0f, 3882.0f, 3883.0f, 3884.0f, 3885.0f, 3886.0f, 3887.0f, 3888.0f, 3889.0f, 3890.0f, 3891.0f, 3892.0f, 3893.0f, 3894.0f, 3895.0f, 3896.0f, 3897.0f, 3898.0f, 3899.0f, -1920.0f, -1921.0f, -1922.0f, -1923.0f, -1924.0f, -1925.0f, -1926.0f, -1927.0f, -1928.0f, -1929.0f, -1930.0f, -1931.0f, -1932.0f, -1933.0f, -1934.0f, -1935.0f, -1936.0f, -1937.0f, -1938.0f, -1939.0f, -1940.0f, -1941.0f, -1942.0f, -1943.0f, -1944.0f, -1945.0f, -1946.0f, -1947.0f, -1948.0f, -1949.0f, 3900.0f, 3901.0f, 3902.0f, 3903.0f, 3904.0f, 3905.0f, 3906.0f, 3907.0f, 3908.0f, 3909.0f, 3910.0f, 3911.0f, 3912.0f, 3913.0f, 3914.0f, 3915.0f, 3916.0f, 3917.0f, 3918.0f, 3919.0f, 3920.0f, 3921.0f, 3922.0f, 3923.0f, 3924.0f, 3925.0f, 3926.0f, 3927.0f, 3928.0f, 3929.0f, 3930.0f, 3931.0f, 3932.0f, 3933.0f, 3934.0f, 3935.0f, 3936.0f, 3937.0f, 3938.0f, 3939.0f, 3940.0f, 3941.0f, 3942.0f, 3943.0f, 3944.0f, 3945.0f, 3946.0f, 3947.0f, 3948.0f, 3949.0f, 3950.0f, 3951.0f, 3952.0f, 3953.0f, 3954.0f, 3955.0f, 3956.0f, 3957.0f, 3958.0f, 3959.0f, -1950.0f, -1951.0f, -1952.0f, -1953.0f, -1954.0f, -1955.0f, -1956.0f, -1957.0f, -1958.0f, -1959.0f, -1960.0f, -1961.0f, -1962.0f, -1963.0f, -1964.0f, -1965.0f, -1966.0f, -1967.0f, -1968.0f, -1969.0f, -1970.0f, -1971.0f, -1972.0f, -1973.0f, -1974.0f, -1975.0f, -1976.0f, -1977.0f, -1978.0f, -1979.0f, 3960.0f, 3961.0f, 3962.0f, 3963.0f, 3964.0f, 3965.0f, 3966.0f, 3967.0f, 3968.0f, 3969.0f, 3970.0f, 3971.0f, 3972.0f, 3973.0f, 3974.0f, 3975.0f, 3976.0f, 3977.0f, 3978.0f, 3979.0f, 3980.0f, 3981.0f, 3982.0f, 3983.0f, 3984.0f, 3985.0f, 3986.0f, 3987.0f, 3988.0f, 3989.0f, 3990.0f, 3991.0f, 3992.0f, 3993.0f, 3994.0f, 3995.0f, 3996.0f, 3997.0f, 3998.0f, 3999.0f, 4000.0f, 4001.0f, 4002.0f, 4003.0f, 4004.0f, 4005.0f, 4006.0f, 4007.0f, 4008.0f, 4009.0f, 4010.0f, 4011.0f, 4012.0f, 4013.0f, 4014.0f, 4015.0f, 4016.0f, 4017.0f, 4018.0f, 4019.0f, -1980.0f, -1981.0f, -1982.0f, -1983.0f, -1984.0f, -1985.0f, -1986.0f, -1987.0f, -1988.0f, -1989.0f, -1990.0f, -1991.0f, -1992.0f, -1993.0f, -1994.0f, -1995.0f, -1996.0f, -1997.0f, -1998.0f, -1999.0f, -2000.0f, -2001.0f, -2002.0f, -2003.0f, -2004.0f, -2005.0f, -2006.0f, -2007.0f, -2008.0f, -2009.0f, 4020.0f, 4021.0f, 4022.0f, 4023.0f, 4024.0f, 4025.0f, 4026.0f, 4027.0f, 4028.0f, 4029.0f, 4030.0f, 4031.0f, 4032.0f, 4033.0f, 4034.0f, 4035.0f, 4036.0f, 4037.0f, 4038.0f, 4039.0f, 4040.0f, 4041.0f, 4042.0f, 4043.0f, 4044.0f, 4045.0f, 4046.0f, 4047.0f, 4048.0f, 4049.0f, 4050.0f, 4051.0f, 4052.0f, 4053.0f, 4054.0f, 4055.0f, 4056.0f, 4057.0f, 4058.0f, 4059.0f, 4060.0f, 4061.0f, 4062.0f, 4063.0f, 4064.0f, 4065.0f, 4066.0f, 4067.0f, 4068.0f, 4069.0f, 4070.0f, 4071.0f, 4072.0f, 4073.0f, 4074.0f, 4075.0f, 4076.0f, 4077.0f, 4078.0f, 4079.0f, -2010.0f, -2011.0f, -2012.0f, -2013.0f, -2014.0f, -2015.0f, -2016.0f, -2017.0f, -2018.0f, -2019.0f, -2020.0f, -2021.0f, -2022.0f, -2023.0f, -2024.0f, -2025.0f, -2026.0f, -2027.0f, -2028.0f, -2029.0f, -2030.0f, -2031.0f, -2032.0f, -2033.0f, -2034.0f, -2035.0f, -2036.0f, -2037.0f, -2038.0f, -2039.0f, 4080.0f, 4081.0f, 4082.0f, 4083.0f, 4084.0f, 4085.0f, 4086.0f, 4087.0f, 4088.0f, 4089.0f, 4090.0f, 4091.0f, 4092.0f, 4093.0f, 4094.0f, 4095.0f, 4096.0f, 4097.0f, 4098.0f, 4099.0f, 4100.0f, 4101.0f, 4102.0f, 4103.0f, 4104.0f, 4105.0f, 4106.0f, 4107.0f, 4108.0f, 4109.0f, 4110.0f, 4111.0f, 4112.0f, 4113.0f, 4114.0f, 4115.0f, 4116.0f, 4117.0f, 4118.0f, 4119.0f, 4120.0f, 4121.0f, 4122.0f, 4123.0f, 4124.0f, 4125.0f, 4126.0f, 4127.0f, 4128.0f, 4129.0f, 4130.0f, 4131.0f, 4132.0f, 4133.0f, 4134.0f, 4135.0f, 4136.0f, 4137.0f, 4138.0f, 4139.0f, -2040.0f, -2041.0f, -2042.0f, -2043.0f, -2044.0f, -2045.0f, -2046.0f, -2047.0f, -2048.0f, -2049.0f, -2050.0f, -2051.0f, -2052.0f, -2053.0f, -2054.0f, -2055.0f, -2056.0f, -2057.0f, -2058.0f, -2059.0f, -2060.0f, -2061.0f, -2062.0f, -2063.0f, -2064.0f, -2065.0f, -2066.0f, -2067.0f, -2068.0f, -2069.0f, 4140.0f, 4141.0f, 4142.0f, 4143.0f, 4144.0f, 4145.0f, 4146.0f, 4147.0f, 4148.0f, 4149.0f, 4150.0f, 4151.0f, 4152.0f, 4153.0f, 4154.0f, 4155.0f, 4156.0f, 4157.0f, 4158.0f, 4159.0f, 4160.0f, 4161.0f, 4162.0f, 4163.0f, 4164.0f, 4165.0f, 4166.0f, 4167.0f, 4168.0f, 4169.0f, 4170.0f, 4171.0f, 4172.0f, 4173.0f, 4174.0f, 4175.0f, 4176.0f, 4177.0f, 4178.0f, 4179.0f, 4180.0f, 4181.0f, 4182.0f, 4183.0f, 4184.0f, 4185.0f, 4186.0f, 4187.0f, 4188.0f, 4189.0f, 4190.0f, 4191.0f, 4192.0f, 4193.0f, 4194.0f, 4195.0f, 4196.0f, 4197.0f, 4198.0f, 4199.0f, -2070.0f, -2071.0f, -2072.0f, -2073.0f, -2074.0f, -2075.0f, -2076.0f, -2077.0f, -2078.0f, -2079.0f, -2080.0f, -2081.0f, -2082.0f, -2083.0f, -2084.0f, -2085.0f, -2086.0f, -2087.0f, -2088.0f, -2089.0f, -2090.0f, -2091.0f, -2092.0f, -2093.0f, -2094.0f, -2095.0f, -2096.0f, -2097.0f, -2098.0f, -2099.0f, 4200.0f, 4201.0f, 4202.0f, 4203.0f, 4204.0f, 4205.0f, 4206.0f, 4207.0f, 4208.0f, 4209.0f, 4210.0f, 4211.0f, 4212.0f, 4213.0f, 4214.0f, 4215.0f, 4216.0f, 4217.0f, 4218.0f, 4219.0f, 4220.0f, 4221.0f, 4222.0f, 4223.0f, 4224.0f, 4225.0f, 4226.0f, 4227.0f, 4228.0f, 4229.0f, 4230.0f, 4231.0f, 4232.0f, 4233.0f, 4234.0f, 4235.0f, 4236.0f, 4237.0f, 4238.0f, 4239.0f, 4240.0f, 4241.0f, 4242.0f, 4243.0f, 4244.0f, 4245.0f, 4246.0f, 4247.0f, 4248.0f, 4249.0f, 4250.0f, 4251.0f, 4252.0f, 4253.0f, 4254.0f, 4255.0f, 4256.0f, 4257.0f, 4258.0f, 4259.0f, -2100.0f, -2101.0f, -2102.0f, -2103.0f, -2104.0f, -2105.0f, -2106.0f, -2107.0f, -2108.0f, -2109.0f, -2110.0f, -2111.0f, -2112.0f, -2113.0f, -2114.0f, -2115.0f, -2116.0f, -2117.0f, -2118.0f, -2119.0f, -2120.0f, -2121.0f, -2122.0f, -2123.0f, -2124.0f, -2125.0f, -2126.0f, -2127.0f, -2128.0f, -2129.0f, 4260.0f, 4261.0f, 4262.0f, 4263.0f, 4264.0f, 4265.0f, 4266.0f, 4267.0f, 4268.0f, 4269.0f, 4270.0f, 4271.0f, 4272.0f, 4273.0f, 4274.0f, 4275.0f, 4276.0f, 4277.0f, 4278.0f, 4279.0f, 4280.0f, 4281.0f, 4282.0f, 4283.0f, 4284.0f, 4285.0f, 4286.0f, 4287.0f, 4288.0f, 4289.0f, 4290.0f, 4291.0f, 4292.0f, 4293.0f, 4294.0f, 4295.0f, 4296.0f, 4297.0f, 4298.0f, 4299.0f, 4300.0f, 4301.0f, 4302.0f, 4303.0f, 4304.0f, 4305.0f, 4306.0f, 4307.0f, 4308.0f, 4309.0f, 4310.0f, 4311.0f, 4312.0f, 4313.0f, 4314.0f, 4315.0f, 4316.0f, 4317.0f, 4318.0f, 4319.0f, -2130.0f, -2131.0f, -2132.0f, -2133.0f, -2134.0f, -2135.0f, -2136.0f, -2137.0f, -2138.0f, -2139.0f, -2140.0f, -2141.0f, -2142.0f, -2143.0f, -2144.0f, -2145.0f, -2146.0f, -2147.0f, -2148.0f, -2149.0f, -2150.0f, -2151.0f, -2152.0f, -2153.0f, -2154.0f, -2155.0f, -2156.0f, -2157.0f, -2158.0f, -2159.0f, 4320.0f, 4321.0f, 4322.0f, 4323.0f, 4324.0f, 4325.0f, 4326.0f, 4327.0f, 4328.0f, 4329.0f, 4330.0f, 4331.0f, 4332.0f, 4333.0f, 4334.0f, 4335.0f, 4336.0f, 4337.0f, 4338.0f, 4339.0f, 4340.0f, 4341.0f, 4342.0f, 4343.0f, 4344.0f, 4345.0f, 4346.0f, 4347.0f, 4348.0f, 4349.0f, 4350.0f, 4351.0f, 4352.0f, 4353.0f, 4354.0f, 4355.0f, 4356.0f, 4357.0f, 4358.0f, 4359.0f, 4360.0f, 4361.0f, 4362.0f, 4363.0f, 4364.0f, 4365.0f, 4366.0f, 4367.0f, 4368.0f, 4369.0f, 4370.0f, 4371.0f, 4372.0f, 4373.0f, 4374.0f, 4375.0f, 4376.0f, 4377.0f, 4378.0f, 4379.0f, -2160.0f, -2161.0f, -2162.0f, -2163.0f, -2164.0f, -2165.0f, -2166.0f, -2167.0f, -2168.0f, -2169.0f, -2170.0f, -2171.0f, -2172.0f, -2173.0f, -2174.0f, -2175.0f, -2176.0f, -2177.0f, -2178.0f, -2179.0f, -2180.0f, -2181.0f, -2182.0f, -2183.0f, -2184.0f, -2185.0f, -2186.0f, -2187.0f, -2188.0f, -2189.0f, 4380.0f, 4381.0f, 4382.0f, 4383.0f, 4384.0f, 4385.0f, 4386.0f, 4387.0f, 4388.0f, 4389.0f, 4390.0f, 4391.0f, 4392.0f, 4393.0f, 4394.0f, 4395.0f, 4396.0f, 4397.0f, 4398.0f, 4399.0f, 4400.0f, 4401.0f, 4402.0f, 4403.0f, 4404.0f, 4405.0f, 4406.0f, 4407.0f, 4408.0f, 4409.0f, 4410.0f, 4411.0f, 4412.0f, 4413.0f, 4414.0f, 4415.0f, 4416.0f, 4417.0f, 4418.0f, 4419.0f, 4420.0f, 4421.0f, 4422.0f, 4423.0f, 4424.0f, 4425.0f, 4426.0f, 4427.0f, 4428.0f, 4429.0f, 4430.0f, 4431.0f, 4432.0f, 4433.0f, 4434.0f, 4435.0f, 4436.0f, 4437.0f, 4438.0f, 4439.0f, -2190.0f, -2191.0f, -2192.0f, -2193.0f, -2194.0f, -2195.0f, -2196.0f, -2197.0f, -2198.0f, -2199.0f, -2200.0f, -2201.0f, -2202.0f, -2203.0f, -2204.0f, -2205.0f, -2206.0f, -2207.0f, -2208.0f, -2209.0f, -2210.0f, -2211.0f, -2212.0f, -2213.0f, -2214.0f, -2215.0f, -2216.0f, -2217.0f, -2218.0f, -2219.0f, 4440.0f, 4441.0f, 4442.0f, 4443.0f, 4444.0f, 4445.0f, 4446.0f, 4447.0f, 4448.0f, 4449.0f, 4450.0f, 4451.0f, 4452.0f, 4453.0f, 4454.0f, 4455.0f, 4456.0f, 4457.0f, 4458.0f, 4459.0f, 4460.0f, 4461.0f, 4462.0f, 4463.0f, 4464.0f, 4465.0f, 4466.0f, 4467.0f, 4468.0f, 4469.0f, 4470.0f, 4471.0f, 4472.0f, 4473.0f, 4474.0f, 4475.0f, 4476.0f, 4477.0f, 4478.0f, 4479.0f, 4480.0f, 4481.0f, 4482.0f, 4483.0f, 4484.0f, 4485.0f, 4486.0f, 4487.0f, 4488.0f, 4489.0f, 4490.0f, 4491.0f, 4492.0f, 4493.0f, 4494.0f, 4495.0f, 4496.0f, 4497.0f, 4498.0f, 4499.0f, -2220.0f, -2221.0f, -2222.0f, -2223.0f, -2224.0f, -2225.0f, -2226.0f, -2227.0f, -2228.0f, -2229.0f, -2230.0f, -2231.0f, -2232.0f, -2233.0f, -2234.0f, -2235.0f, -2236.0f, -2237.0f, -2238.0f, -2239.0f, -2240.0f, -2241.0f, -2242.0f, -2243.0f, -2244.0f, -2245.0f, -2246.0f, -2247.0f, -2248.0f, -2249.0f, 4500.0f, 4501.0f, 4502.0f, 4503.0f, 4504.0f, 4505.0f, 4506.0f, 4507.0f, 4508.0f, 4509.0f, 4510.0f, 4511.0f, 4512.0f, 4513.0f, 4514.0f, 4515.0f, 4516.0f, 4517.0f, 4518.0f, 4519.0f, 4520.0f, 4521.0f, 4522.0f, 4523.0f, 4524.0f, 4525.0f, 4526.0f, 4527.0f, 4528.0f, 4529.0f, 4530.0f, 4531.0f, 4532.0f, 4533.0f, 4534.0f, 4535.0f, 4536.0f, 4537.0f, 4538.0f, 4539.0f, 4540.0f, 4541.0f, 4542.0f, 4543.0f, 4544.0f, 4545.0f, 4546.0f, 4547.0f, 4548.0f, 4549.0f, 4550.0f, 4551.0f, 4552.0f, 4553.0f, 4554.0f, 4555.0f, 4556.0f, 4557.0f, 4558.0f, 4559.0f, -2250.0f, -2251.0f, -2252.0f, -2253.0f, -2254.0f, -2255.0f, -2256.0f, -2257.0f, -2258.0f, -2259.0f, -2260.0f, -2261.0f, -2262.0f, -2263.0f, -2264.0f, -2265.0f, -2266.0f, -2267.0f, -2268.0f, -2269.0f, -2270.0f, -2271.0f, -2272.0f, -2273.0f, -2274.0f, -2275.0f, -2276.0f, -2277.0f, -2278.0f, -2279.0f, 4560.0f, 4561.0f, 4562.0f, 4563.0f, 4564.0f, 4565.0f, 4566.0f, 4567.0f, 4568.0f, 4569.0f, 4570.0f, 4571.0f, 4572.0f, 4573.0f, 4574.0f, 4575.0f, 4576.0f, 4577.0f, 4578.0f, 4579.0f, 4580.0f, 4581.0f, 4582.0f, 4583.0f, 4584.0f, 4585.0f, 4586.0f, 4587.0f, 4588.0f, 4589.0f, 4590.0f, 4591.0f, 4592.0f, 4593.0f, 4594.0f, 4595.0f, 4596.0f, 4597.0f, 4598.0f, 4599.0f, 4600.0f, 4601.0f, 4602.0f, 4603.0f, 4604.0f, 4605.0f, 4606.0f, 4607.0f, 4608.0f, 4609.0f, 4610.0f, 4611.0f, 4612.0f, 4613.0f, 4614.0f, 4615.0f, 4616.0f, 4617.0f, 4618.0f, 4619.0f, -2280.0f, -2281.0f, -2282.0f, -2283.0f, -2284.0f, -2285.0f, -2286.0f, -2287.0f, -2288.0f, -2289.0f, -2290.0f, -2291.0f, -2292.0f, -2293.0f, -2294.0f, -2295.0f, -2296.0f, -2297.0f, -2298.0f, -2299.0f, -2300.0f, -2301.0f, -2302.0f, -2303.0f, -2304.0f, -2305.0f, -2306.0f, -2307.0f, -2308.0f, -2309.0f, 4620.0f, 4621.0f, 4622.0f, 4623.0f, 4624.0f, 4625.0f, 4626.0f, 4627.0f, 4628.0f, 4629.0f, 4630.0f, 4631.0f, 4632.0f, 4633.0f, 4634.0f, 4635.0f, 4636.0f, 4637.0f, 4638.0f, 4639.0f, 4640.0f, 4641.0f, 4642.0f, 4643.0f, 4644.0f, 4645.0f, 4646.0f, 4647.0f, 4648.0f, 4649.0f, 4650.0f, 4651.0f, 4652.0f, 4653.0f, 4654.0f, 4655.0f, 4656.0f, 4657.0f, 4658.0f, 4659.0f, 4660.0f, 4661.0f, 4662.0f, 4663.0f, 4664.0f, 4665.0f, 4666.0f, 4667.0f, 4668.0f, 4669.0f, 4670.0f, 4671.0f, 4672.0f, 4673.0f, 4674.0f, 4675.0f, 4676.0f, 4677.0f, 4678.0f, 4679.0f, -2310.0f, -2311.0f, -2312.0f, -2313.0f, -2314.0f, -2315.0f, -2316.0f, -2317.0f, -2318.0f, -2319.0f, -2320.0f, -2321.0f, -2322.0f, -2323.0f, -2324.0f, -2325.0f, -2326.0f, -2327.0f, -2328.0f, -2329.0f, -2330.0f, -2331.0f, -2332.0f, -2333.0f, -2334.0f, -2335.0f, -2336.0f, -2337.0f, -2338.0f, -2339.0f, 4680.0f, 4681.0f, 4682.0f, 4683.0f, 4684.0f, 4685.0f, 4686.0f, 4687.0f, 4688.0f, 4689.0f, 4690.0f, 4691.0f, 4692.0f, 4693.0f, 4694.0f, 4695.0f, 4696.0f, 4697.0f, 4698.0f, 4699.0f, 4700.0f, 4701.0f, 4702.0f, 4703.0f, 4704.0f, 4705.0f, 4706.0f, 4707.0f, 4708.0f, 4709.0f, 4710.0f, 4711.0f, 4712.0f, 4713.0f, 4714.0f, 4715.0f, 4716.0f, 4717.0f, 4718.0f, 4719.0f, 4720.0f, 4721.0f, 4722.0f, 4723.0f, 4724.0f, 4725.0f, 4726.0f, 4727.0f, 4728.0f, 4729.0f, 4730.0f, 4731.0f, 4732.0f, 4733.0f, 4734.0f, 4735.0f, 4736.0f, 4737.0f, 4738.0f, 4739.0f, -2340.0f, -2341.0f, -2342.0f, -2343.0f, -2344.0f, -2345.0f, -2346.0f, -2347.0f, -2348.0f, -2349.0f, -2350.0f, -2351.0f, -2352.0f, -2353.0f, -2354.0f, -2355.0f, -2356.0f, -2357.0f, -2358.0f, -2359.0f, -2360.0f, -2361.0f, -2362.0f, -2363.0f, -2364.0f, -2365.0f, -2366.0f, -2367.0f, -2368.0f, -2369.0f, 4740.0f, 4741.0f, 4742.0f, 4743.0f, 4744.0f, 4745.0f, 4746.0f, 4747.0f, 4748.0f, 4749.0f, 4750.0f, 4751.0f, 4752.0f, 4753.0f, 4754.0f, 4755.0f, 4756.0f, 4757.0f, 4758.0f, 4759.0f, 4760.0f, 4761.0f, 4762.0f, 4763.0f, 4764.0f, 4765.0f, 4766.0f, 4767.0f, 4768.0f, 4769.0f, 4770.0f, 4771.0f, 4772.0f, 4773.0f, 4774.0f, 4775.0f, 4776.0f, 4777.0f, 4778.0f, 4779.0f, 4780.0f, 4781.0f, 4782.0f, 4783.0f, 4784.0f, 4785.0f, 4786.0f, 4787.0f, 4788.0f, 4789.0f, 4790.0f, 4791.0f, 4792.0f, 4793.0f, 4794.0f, 4795.0f, 4796.0f, 4797.0f, 4798.0f, 4799.0f, -2370.0f, -2371.0f, -2372.0f, -2373.0f, -2374.0f, -2375.0f, -2376.0f, -2377.0f, -2378.0f, -2379.0f, -2380.0f, -2381.0f, -2382.0f, -2383.0f, -2384.0f, -2385.0f, -2386.0f, -2387.0f, -2388.0f, -2389.0f, -2390.0f, -2391.0f, -2392.0f, -2393.0f, -2394.0f, -2395.0f, -2396.0f, -2397.0f, -2398.0f, -2399.0f, 4800.0f, 4801.0f, 4802.0f, 4803.0f, 4804.0f, 4805.0f, 4806.0f, 4807.0f, 4808.0f, 4809.0f, 4810.0f, 4811.0f, 4812.0f, 4813.0f, 4814.0f, 4815.0f, 4816.0f, 4817.0f, 4818.0f, 4819.0f, 4820.0f, 4821.0f, 4822.0f, 4823.0f, 4824.0f, 4825.0f, 4826.0f, 4827.0f, 4828.0f, 4829.0f, 4830.0f, 4831.0f, 4832.0f, 4833.0f, 4834.0f, 4835.0f, 4836.0f, 4837.0f, 4838.0f, 4839.0f, 4840.0f, 4841.0f, 4842.0f, 4843.0f, 4844.0f, 4845.0f, 4846.0f, 4847.0f, 4848.0f, 4849.0f, 4850.0f, 4851.0f, 4852.0f, 4853.0f, 4854.0f, 4855.0f, 4856.0f, 4857.0f, 4858.0f, 4859.0f, -2400.0f, -2401.0f, -2402.0f, -2403.0f, -2404.0f, -2405.0f, -2406.0f, -2407.0f, -2408.0f, -2409.0f, -2410.0f, -2411.0f, -2412.0f, -2413.0f, -2414.0f, -2415.0f, -2416.0f, -2417.0f, -2418.0f, -2419.0f, -2420.0f, -2421.0f, -2422.0f, -2423.0f, -2424.0f, -2425.0f, -2426.0f, -2427.0f, -2428.0f, -2429.0f, 4860.0f, 4861.0f, 4862.0f, 4863.0f, 4864.0f, 4865.0f, 4866.0f, 4867.0f, 4868.0f, 4869.0f, 4870.0f, 4871.0f, 4872.0f, 4873.0f, 4874.0f, 4875.0f, 4876.0f, 4877.0f, 4878.0f, 4879.0f, 4880.0f, 4881.0f, 4882.0f, 4883.0f, 4884.0f, 4885.0f, 4886.0f, 4887.0f, 4888.0f, 4889.0f, 4890.0f, 4891.0f, 4892.0f, 4893.0f, 4894.0f, 4895.0f, 4896.0f, 4897.0f, 4898.0f, 4899.0f, 4900.0f, 4901.0f, 4902.0f, 4903.0f, 4904.0f, 4905.0f, 4906.0f, 4907.0f, 4908.0f, 4909.0f, 4910.0f, 4911.0f, 4912.0f, 4913.0f, 4914.0f, 4915.0f, 4916.0f, 4917.0f, 4918.0f, 4919.0f, -2430.0f, -2431.0f, -2432.0f, -2433.0f, -2434.0f, -2435.0f, -2436.0f, -2437.0f, -2438.0f, -2439.0f, -2440.0f, -2441.0f, -2442.0f, -2443.0f, -2444.0f, -2445.0f, -2446.0f, -2447.0f, -2448.0f, -2449.0f, -2450.0f, -2451.0f, -2452.0f, -2453.0f, -2454.0f, -2455.0f, -2456.0f, -2457.0f, -2458.0f, -2459.0f, 4920.0f, 4921.0f, 4922.0f, 4923.0f, 4924.0f, 4925.0f, 4926.0f, 4927.0f, 4928.0f, 4929.0f, 4930.0f, 4931.0f, 4932.0f, 4933.0f, 4934.0f, 4935.0f, 4936.0f, 4937.0f, 4938.0f, 4939.0f, 4940.0f, 4941.0f, 4942.0f, 4943.0f, 4944.0f, 4945.0f, 4946.0f, 4947.0f, 4948.0f, 4949.0f, 4950.0f, 4951.0f, 4952.0f, 4953.0f, 4954.0f, 4955.0f, 4956.0f, 4957.0f, 4958.0f, 4959.0f, 4960.0f, 4961.0f, 4962.0f, 4963.0f, 4964.0f, 4965.0f, 4966.0f, 4967.0f, 4968.0f, 4969.0f, 4970.0f, 4971.0f, 4972.0f, 4973.0f, 4974.0f, 4975.0f, 4976.0f, 4977.0f, 4978.0f, 4979.0f, -2460.0f, -2461.0f, -2462.0f, -2463.0f, -2464.0f, -2465.0f, -2466.0f, -2467.0f, -2468.0f, -2469.0f, -2470.0f, -2471.0f, -2472.0f, -2473.0f, -2474.0f, -2475.0f, -2476.0f, -2477.0f, -2478.0f, -2479.0f, -2480.0f, -2481.0f, -2482.0f, -2483.0f, -2484.0f, -2485.0f, -2486.0f, -2487.0f, -2488.0f, -2489.0f, 4980.0f, 4981.0f, 4982.0f, 4983.0f, 4984.0f, 4985.0f, 4986.0f, 4987.0f, 4988.0f, 4989.0f, 4990.0f, 4991.0f, 4992.0f, 4993.0f, 4994.0f, 4995.0f, 4996.0f, 4997.0f, 4998.0f, 4999.0f, 5000.0f, 5001.0f, 5002.0f, 5003.0f, 5004.0f, 5005.0f, 5006.0f, 5007.0f, 5008.0f, 5009.0f, 5010.0f, 5011.0f, 5012.0f, 5013.0f, 5014.0f, 5015.0f, 5016.0f, 5017.0f, 5018.0f, 5019.0f, 5020.0f, 5021.0f, 5022.0f, 5023.0f, 5024.0f, 5025.0f, 5026.0f, 5027.0f, 5028.0f, 5029.0f, 5030.0f, 5031.0f, 5032.0f, 5033.0f, 5034.0f, 5035.0f, 5036.0f, 5037.0f, 5038.0f, 5039.0f, -2490.0f, -2491.0f, -2492.0f, -2493.0f, -2494.0f, -2495.0f, -2496.0f, -2497.0f, -2498.0f, -2499.0f, -2500.0f, -2501.0f, -2502.0f, -2503.0f, -2504.0f, -2505.0f, -2506.0f, -2507.0f, -2508.0f, -2509.0f, -2510.0f, -2511.0f, -2512.0f, -2513.0f, -2514.0f, -2515.0f, -2516.0f, -2517.0f, -2518.0f, -2519.0f, 5040.0f, 5041.0f, 5042.0f, 5043.0f, 5044.0f, 5045.0f, 5046.0f, 5047.0f, 5048.0f, 5049.0f, 5050.0f, 5051.0f, 5052.0f, 5053.0f, 5054.0f, 5055.0f, 5056.0f, 5057.0f, 5058.0f, 5059.0f, 5060.0f, 5061.0f, 5062.0f, 5063.0f, 5064.0f, 5065.0f, 5066.0f, 5067.0f, 5068.0f, 5069.0f, 5070.0f, 5071.0f, 5072.0f, 5073.0f, 5074.0f, 5075.0f, 5076.0f, 5077.0f, 5078.0f, 5079.0f, 5080.0f, 5081.0f, 5082.0f, 5083.0f, 5084.0f, 5085.0f, 5086.0f, 5087.0f, 5088.0f, 5089.0f, 5090.0f, 5091.0f, 5092.0f, 5093.0f, 5094.0f, 5095.0f, 5096.0f, 5097.0f, 5098.0f, 5099.0f, -2520.0f, -2521.0f, -2522.0f, -2523.0f, -2524.0f, -2525.0f, -2526.0f, -2527.0f, -2528.0f, -2529.0f, -2530.0f, -2531.0f, -2532.0f, -2533.0f, -2534.0f, -2535.0f, -2536.0f, -2537.0f, -2538.0f, -2539.0f, -2540.0f, -2541.0f, -2542.0f, -2543.0f, -2544.0f, -2545.0f, -2546.0f, -2547.0f, -2548.0f, -2549.0f, 5100.0f, 5101.0f, 5102.0f, 5103.0f, 5104.0f, 5105.0f, 5106.0f, 5107.0f, 5108.0f, 5109.0f, 5110.0f, 5111.0f, 5112.0f, 5113.0f, 5114.0f, 5115.0f, 5116.0f, 5117.0f, 5118.0f, 5119.0f, 5120.0f, 5121.0f, 5122.0f, 5123.0f, 5124.0f, 5125.0f, 5126.0f, 5127.0f, 5128.0f, 5129.0f, 5130.0f, 5131.0f, 5132.0f, 5133.0f, 5134.0f, 5135.0f, 5136.0f, 5137.0f, 5138.0f, 5139.0f, 5140.0f, 5141.0f, 5142.0f, 5143.0f, 5144.0f, 5145.0f, 5146.0f, 5147.0f, 5148.0f, 5149.0f, 5150.0f, 5151.0f, 5152.0f, 5153.0f, 5154.0f, 5155.0f, 5156.0f, 5157.0f, 5158.0f, 5159.0f, -2550.0f, -2551.0f, -2552.0f, -2553.0f, -2554.0f, -2555.0f, -2556.0f, -2557.0f, -2558.0f, -2559.0f, -2560.0f, -2561.0f, -2562.0f, -2563.0f, -2564.0f, -2565.0f, -2566.0f, -2567.0f, -2568.0f, -2569.0f, -2570.0f, -2571.0f, -2572.0f, -2573.0f, -2574.0f, -2575.0f, -2576.0f, -2577.0f, -2578.0f, -2579.0f, 5160.0f, 5161.0f, 5162.0f, 5163.0f, 5164.0f, 5165.0f, 5166.0f, 5167.0f, 5168.0f, 5169.0f, 5170.0f, 5171.0f, 5172.0f, 5173.0f, 5174.0f, 5175.0f, 5176.0f, 5177.0f, 5178.0f, 5179.0f, 5180.0f, 5181.0f, 5182.0f, 5183.0f, 5184.0f, 5185.0f, 5186.0f, 5187.0f, 5188.0f, 5189.0f, 5190.0f, 5191.0f, 5192.0f, 5193.0f, 5194.0f, 5195.0f, 5196.0f, 5197.0f, 5198.0f, 5199.0f, 5200.0f, 5201.0f, 5202.0f, 5203.0f, 5204.0f, 5205.0f, 5206.0f, 5207.0f, 5208.0f, 5209.0f, 5210.0f, 5211.0f, 5212.0f, 5213.0f, 5214.0f, 5215.0f, 5216.0f, 5217.0f, 5218.0f, 5219.0f, -2580.0f, -2581.0f, -2582.0f, -2583.0f, -2584.0f, -2585.0f, -2586.0f, -2587.0f, -2588.0f, -2589.0f, -2590.0f, -2591.0f, -2592.0f, -2593.0f, -2594.0f, -2595.0f, -2596.0f, -2597.0f, -2598.0f, -2599.0f, -2600.0f, -2601.0f, -2602.0f, -2603.0f, -2604.0f, -2605.0f, -2606.0f, -2607.0f, -2608.0f, -2609.0f, 5220.0f, 5221.0f, 5222.0f, 5223.0f, 5224.0f, 5225.0f, 5226.0f, 5227.0f, 5228.0f, 5229.0f, 5230.0f, 5231.0f, 5232.0f, 5233.0f, 5234.0f, 5235.0f, 5236.0f, 5237.0f, 5238.0f, 5239.0f, 5240.0f, 5241.0f, 5242.0f, 5243.0f, 5244.0f, 5245.0f, 5246.0f, 5247.0f, 5248.0f, 5249.0f, 5250.0f, 5251.0f, 5252.0f, 5253.0f, 5254.0f, 5255.0f, 5256.0f, 5257.0f, 5258.0f, 5259.0f, 5260.0f, 5261.0f, 5262.0f, 5263.0f, 5264.0f, 5265.0f, 5266.0f, 5267.0f, 5268.0f, 5269.0f, 5270.0f, 5271.0f, 5272.0f, 5273.0f, 5274.0f, 5275.0f, 5276.0f, 5277.0f, 5278.0f, 5279.0f, -2610.0f, -2611.0f, -2612.0f, -2613.0f, -2614.0f, -2615.0f, -2616.0f, -2617.0f, -2618.0f, -2619.0f, -2620.0f, -2621.0f, -2622.0f, -2623.0f, -2624.0f, -2625.0f, -2626.0f, -2627.0f, -2628.0f, -2629.0f, -2630.0f, -2631.0f, -2632.0f, -2633.0f, -2634.0f, -2635.0f, -2636.0f, -2637.0f, -2638.0f, -2639.0f, 5280.0f, 5281.0f, 5282.0f, 5283.0f, 5284.0f, 5285.0f, 5286.0f, 5287.0f, 5288.0f, 5289.0f, 5290.0f, 5291.0f, 5292.0f, 5293.0f, 5294.0f, 5295.0f, 5296.0f, 5297.0f, 5298.0f, 5299.0f, 5300.0f, 5301.0f, 5302.0f, 5303.0f, 5304.0f, 5305.0f, 5306.0f, 5307.0f, 5308.0f, 5309.0f, 5310.0f, 5311.0f, 5312.0f, 5313.0f, 5314.0f, 5315.0f, 5316.0f, 5317.0f, 5318.0f, 5319.0f, 5320.0f, 5321.0f, 5322.0f, 5323.0f, 5324.0f, 5325.0f, 5326.0f, 5327.0f, 5328.0f, 5329.0f, 5330.0f, 5331.0f, 5332.0f, 5333.0f, 5334.0f, 5335.0f, 5336.0f, 5337.0f, 5338.0f, 5339.0f, -2640.0f, -2641.0f, -2642.0f, -2643.0f, -2644.0f, -2645.0f, -2646.0f, -2647.0f, -2648.0f, -2649.0f, -2650.0f, -2651.0f, -2652.0f, -2653.0f, -2654.0f, -2655.0f, -2656.0f, -2657.0f, -2658.0f, -2659.0f, -2660.0f, -2661.0f, -2662.0f, -2663.0f, -2664.0f, -2665.0f, -2666.0f, -2667.0f, -2668.0f, -2669.0f, 5340.0f, 5341.0f, 5342.0f, 5343.0f, 5344.0f, 5345.0f, 5346.0f, 5347.0f, 5348.0f, 5349.0f, 5350.0f, 5351.0f, 5352.0f, 5353.0f, 5354.0f, 5355.0f, 5356.0f, 5357.0f, 5358.0f, 5359.0f, 5360.0f, 5361.0f, 5362.0f, 5363.0f, 5364.0f, 5365.0f, 5366.0f, 5367.0f, 5368.0f, 5369.0f, 5370.0f, 5371.0f, 5372.0f, 5373.0f, 5374.0f, 5375.0f, 5376.0f, 5377.0f, 5378.0f, 5379.0f, 5380.0f, 5381.0f, 5382.0f, 5383.0f, 5384.0f, 5385.0f, 5386.0f, 5387.0f, 5388.0f, 5389.0f, 5390.0f, 5391.0f, 5392.0f, 5393.0f, 5394.0f, 5395.0f, 5396.0f, 5397.0f, 5398.0f, 5399.0f, -2670.0f, -2671.0f, -2672.0f, -2673.0f, -2674.0f, -2675.0f, -2676.0f, -2677.0f, -2678.0f, -2679.0f, -2680.0f, -2681.0f, -2682.0f, -2683.0f, -2684.0f, -2685.0f, -2686.0f, -2687.0f, -2688.0f, -2689.0f, -2690.0f, -2691.0f, -2692.0f, -2693.0f, -2694.0f, -2695.0f, -2696.0f, -2697.0f, -2698.0f, -2699.0f, 5400.0f, 5401.0f, 5402.0f, 5403.0f, 5404.0f, 5405.0f, 5406.0f, 5407.0f, 5408.0f, 5409.0f, 5410.0f, 5411.0f, 5412.0f, 5413.0f, 5414.0f, 5415.0f, 5416.0f, 5417.0f, 5418.0f, 5419.0f, 5420.0f, 5421.0f, 5422.0f, 5423.0f, 5424.0f, 5425.0f, 5426.0f, 5427.0f, 5428.0f, 5429.0f, 5430.0f, 5431.0f, 5432.0f, 5433.0f, 5434.0f, 5435.0f, 5436.0f, 5437.0f, 5438.0f, 5439.0f, 5440.0f, 5441.0f, 5442.0f, 5443.0f, 5444.0f, 5445.0f, 5446.0f, 5447.0f, 5448.0f, 5449.0f, 5450.0f, 5451.0f, 5452.0f, 5453.0f, 5454.0f, 5455.0f, 5456.0f, 5457.0f, 5458.0f, 5459.0f, -2700.0f, -2701.0f, -2702.0f, -2703.0f, -2704.0f, -2705.0f, -2706.0f, -2707.0f, -2708.0f, -2709.0f, -2710.0f, -2711.0f, -2712.0f, -2713.0f, -2714.0f, -2715.0f, -2716.0f, -2717.0f, -2718.0f, -2719.0f, -2720.0f, -2721.0f, -2722.0f, -2723.0f, -2724.0f, -2725.0f, -2726.0f, -2727.0f, -2728.0f, -2729.0f, 5460.0f, 5461.0f, 5462.0f, 5463.0f, 5464.0f, 5465.0f, 5466.0f, 5467.0f, 5468.0f, 5469.0f, 5470.0f, 5471.0f, 5472.0f, 5473.0f, 5474.0f, 5475.0f, 5476.0f, 5477.0f, 5478.0f, 5479.0f, 5480.0f, 5481.0f, 5482.0f, 5483.0f, 5484.0f, 5485.0f, 5486.0f, 5487.0f, 5488.0f, 5489.0f, 5490.0f, 5491.0f, 5492.0f, 5493.0f, 5494.0f, 5495.0f, 5496.0f, 5497.0f, 5498.0f, 5499.0f, 5500.0f, 5501.0f, 5502.0f, 5503.0f, 5504.0f, 5505.0f, 5506.0f, 5507.0f, 5508.0f, 5509.0f, 5510.0f, 5511.0f, 5512.0f, 5513.0f, 5514.0f, 5515.0f, 5516.0f, 5517.0f, 5518.0f, 5519.0f, -2730.0f, -2731.0f, -2732.0f, -2733.0f, -2734.0f, -2735.0f, -2736.0f, -2737.0f, -2738.0f, -2739.0f, -2740.0f, -2741.0f, -2742.0f, -2743.0f, -2744.0f, -2745.0f, -2746.0f, -2747.0f, -2748.0f, -2749.0f, -2750.0f, -2751.0f, -2752.0f, -2753.0f, -2754.0f, -2755.0f, -2756.0f, -2757.0f, -2758.0f, -2759.0f, 5520.0f, 5521.0f, 5522.0f, 5523.0f, 5524.0f, 5525.0f, 5526.0f, 5527.0f, 5528.0f, 5529.0f, 5530.0f, 5531.0f, 5532.0f, 5533.0f, 5534.0f, 5535.0f, 5536.0f, 5537.0f, 5538.0f, 5539.0f, 5540.0f, 5541.0f, 5542.0f, 5543.0f, 5544.0f, 5545.0f, 5546.0f, 5547.0f, 5548.0f, 5549.0f, 5550.0f, 5551.0f, 5552.0f, 5553.0f, 5554.0f, 5555.0f, 5556.0f, 5557.0f, 5558.0f, 5559.0f, 5560.0f, 5561.0f, 5562.0f, 5563.0f, 5564.0f, 5565.0f, 5566.0f, 5567.0f, 5568.0f, 5569.0f, 5570.0f, 5571.0f, 5572.0f, 5573.0f, 5574.0f, 5575.0f, 5576.0f, 5577.0f, 5578.0f, 5579.0f, -2760.0f, -2761.0f, -2762.0f, -2763.0f, -2764.0f, -2765.0f, -2766.0f, -2767.0f, -2768.0f, -2769.0f, -2770.0f, -2771.0f, -2772.0f, -2773.0f, -2774.0f, -2775.0f, -2776.0f, -2777.0f, -2778.0f, -2779.0f, -2780.0f, -2781.0f, -2782.0f, -2783.0f, -2784.0f, -2785.0f, -2786.0f, -2787.0f, -2788.0f, -2789.0f, 5580.0f, 5581.0f, 5582.0f, 5583.0f, 5584.0f, 5585.0f, 5586.0f, 5587.0f, 5588.0f, 5589.0f, 5590.0f, 5591.0f, 5592.0f, 5593.0f, 5594.0f, 5595.0f, 5596.0f, 5597.0f, 5598.0f, 5599.0f, 5600.0f, 5601.0f, 5602.0f, 5603.0f, 5604.0f, 5605.0f, 5606.0f, 5607.0f, 5608.0f, 5609.0f, 5610.0f, 5611.0f, 5612.0f, 5613.0f, 5614.0f, 5615.0f, 5616.0f, 5617.0f, 5618.0f, 5619.0f, 5620.0f, 5621.0f, 5622.0f, 5623.0f, 5624.0f, 5625.0f, 5626.0f, 5627.0f, 5628.0f, 5629.0f, 5630.0f, 5631.0f, 5632.0f, 5633.0f, 5634.0f, 5635.0f, 5636.0f, 5637.0f, 5638.0f, 5639.0f, -2790.0f, -2791.0f, -2792.0f, -2793.0f, -2794.0f, -2795.0f, -2796.0f, -2797.0f, -2798.0f, -2799.0f, -2800.0f, -2801.0f, -2802.0f, -2803.0f, -2804.0f, -2805.0f, -2806.0f, -2807.0f, -2808.0f, -2809.0f, -2810.0f, -2811.0f, -2812.0f, -2813.0f, -2814.0f, -2815.0f, -2816.0f, -2817.0f, -2818.0f, -2819.0f, 5640.0f, 5641.0f, 5642.0f, 5643.0f, 5644.0f, 5645.0f, 5646.0f, 5647.0f, 5648.0f, 5649.0f, 5650.0f, 5651.0f, 5652.0f, 5653.0f, 5654.0f, 5655.0f, 5656.0f, 5657.0f, 5658.0f, 5659.0f, 5660.0f, 5661.0f, 5662.0f, 5663.0f, 5664.0f, 5665.0f, 5666.0f, 5667.0f, 5668.0f, 5669.0f, 5670.0f, 5671.0f, 5672.0f, 5673.0f, 5674.0f, 5675.0f, 5676.0f, 5677.0f, 5678.0f, 5679.0f, 5680.0f, 5681.0f, 5682.0f, 5683.0f, 5684.0f, 5685.0f, 5686.0f, 5687.0f, 5688.0f, 5689.0f, 5690.0f, 5691.0f, 5692.0f, 5693.0f, 5694.0f, 5695.0f, 5696.0f, 5697.0f, 5698.0f, 5699.0f, -2820.0f, -2821.0f, -2822.0f, -2823.0f, -2824.0f, -2825.0f, -2826.0f, -2827.0f, -2828.0f, -2829.0f, -2830.0f, -2831.0f, -2832.0f, -2833.0f, -2834.0f, -2835.0f, -2836.0f, -2837.0f, -2838.0f, -2839.0f, -2840.0f, -2841.0f, -2842.0f, -2843.0f, -2844.0f, -2845.0f, -2846.0f, -2847.0f, -2848.0f, -2849.0f, 5700.0f, 5701.0f, 5702.0f, 5703.0f, 5704.0f, 5705.0f, 5706.0f, 5707.0f, 5708.0f, 5709.0f, 5710.0f, 5711.0f, 5712.0f, 5713.0f, 5714.0f, 5715.0f, 5716.0f, 5717.0f, 5718.0f, 5719.0f, 5720.0f, 5721.0f, 5722.0f, 5723.0f, 5724.0f, 5725.0f, 5726.0f, 5727.0f, 5728.0f, 5729.0f, 5730.0f, 5731.0f, 5732.0f, 5733.0f, 5734.0f, 5735.0f, 5736.0f, 5737.0f, 5738.0f, 5739.0f, 5740.0f, 5741.0f, 5742.0f, 5743.0f, 5744.0f, 5745.0f, 5746.0f, 5747.0f, 5748.0f, 5749.0f, 5750.0f, 5751.0f, 5752.0f, 5753.0f, 5754.0f, 5755.0f, 5756.0f, 5757.0f, 5758.0f, 5759.0f, -2850.0f, -2851.0f, -2852.0f, -2853.0f, -2854.0f, -2855.0f, -2856.0f, -2857.0f, -2858.0f, -2859.0f, -2860.0f, -2861.0f, -2862.0f, -2863.0f, -2864.0f, -2865.0f, -2866.0f, -2867.0f, -2868.0f, -2869.0f, -2870.0f, -2871.0f, -2872.0f, -2873.0f, -2874.0f, -2875.0f, -2876.0f, -2877.0f, -2878.0f, -2879.0f, 5760.0f, 5761.0f, 5762.0f, 5763.0f, 5764.0f, 5765.0f, 5766.0f, 5767.0f, 5768.0f, 5769.0f, 5770.0f, 5771.0f, 5772.0f, 5773.0f, 5774.0f, 5775.0f, 5776.0f, 5777.0f, 5778.0f, 5779.0f, 5780.0f, 5781.0f, 5782.0f, 5783.0f, 5784.0f, 5785.0f, 5786.0f, 5787.0f, 5788.0f, 5789.0f, 5790.0f, 5791.0f, 5792.0f, 5793.0f, 5794.0f, 5795.0f, 5796.0f, 5797.0f, 5798.0f, 5799.0f, 5800.0f, 5801.0f, 5802.0f, 5803.0f, 5804.0f, 5805.0f, 5806.0f, 5807.0f, 5808.0f, 5809.0f, 5810.0f, 5811.0f, 5812.0f, 5813.0f, 5814.0f, 5815.0f, 5816.0f, 5817.0f, 5818.0f, 5819.0f, -2880.0f, -2881.0f, -2882.0f, -2883.0f, -2884.0f, -2885.0f, -2886.0f, -2887.0f, -2888.0f, -2889.0f, -2890.0f, -2891.0f, -2892.0f, -2893.0f, -2894.0f, -2895.0f, -2896.0f, -2897.0f, -2898.0f, -2899.0f, -2900.0f, -2901.0f, -2902.0f, -2903.0f, -2904.0f, -2905.0f, -2906.0f, -2907.0f, -2908.0f, -2909.0f, 5820.0f, 5821.0f, 5822.0f, 5823.0f, 5824.0f, 5825.0f, 5826.0f, 5827.0f, 5828.0f, 5829.0f, 5830.0f, 5831.0f, 5832.0f, 5833.0f, 5834.0f, 5835.0f, 5836.0f, 5837.0f, 5838.0f, 5839.0f, 5840.0f, 5841.0f, 5842.0f, 5843.0f, 5844.0f, 5845.0f, 5846.0f, 5847.0f, 5848.0f, 5849.0f, 5850.0f, 5851.0f, 5852.0f, 5853.0f, 5854.0f, 5855.0f, 5856.0f, 5857.0f, 5858.0f, 5859.0f, 5860.0f, 5861.0f, 5862.0f, 5863.0f, 5864.0f, 5865.0f, 5866.0f, 5867.0f, 5868.0f, 5869.0f, 5870.0f, 5871.0f, 5872.0f, 5873.0f, 5874.0f, 5875.0f, 5876.0f, 5877.0f, 5878.0f, 5879.0f, -2910.0f, -2911.0f, -2912.0f, -2913.0f, -2914.0f, -2915.0f, -2916.0f, -2917.0f, -2918.0f, -2919.0f, -2920.0f, -2921.0f, -2922.0f, -2923.0f, -2924.0f, -2925.0f, -2926.0f, -2927.0f, -2928.0f, -2929.0f, -2930.0f, -2931.0f, -2932.0f, -2933.0f, -2934.0f, -2935.0f, -2936.0f, -2937.0f, -2938.0f, -2939.0f, 5880.0f, 5881.0f, 5882.0f, 5883.0f, 5884.0f, 5885.0f, 5886.0f, 5887.0f, 5888.0f, 5889.0f, 5890.0f, 5891.0f, 5892.0f, 5893.0f, 5894.0f, 5895.0f, 5896.0f, 5897.0f, 5898.0f, 5899.0f, 5900.0f, 5901.0f, 5902.0f, 5903.0f, 5904.0f, 5905.0f, 5906.0f, 5907.0f, 5908.0f, 5909.0f, 5910.0f, 5911.0f, 5912.0f, 5913.0f, 5914.0f, 5915.0f, 5916.0f, 5917.0f, 5918.0f, 5919.0f, 5920.0f, 5921.0f, 5922.0f, 5923.0f, 5924.0f, 5925.0f, 5926.0f, 5927.0f, 5928.0f, 5929.0f, 5930.0f, 5931.0f, 5932.0f, 5933.0f, 5934.0f, 5935.0f, 5936.0f, 5937.0f, 5938.0f, 5939.0f, -2940.0f, -2941.0f, -2942.0f, -2943.0f, -2944.0f, -2945.0f, -2946.0f, -2947.0f, -2948.0f, -2949.0f, -2950.0f, -2951.0f, -2952.0f, -2953.0f, -2954.0f, -2955.0f, -2956.0f, -2957.0f, -2958.0f, -2959.0f, -2960.0f, -2961.0f, -2962.0f, -2963.0f, -2964.0f, -2965.0f, -2966.0f, -2967.0f, -2968.0f, -2969.0f, 5940.0f, 5941.0f, 5942.0f, 5943.0f, 5944.0f, 5945.0f, 5946.0f, 5947.0f, 5948.0f, 5949.0f, 5950.0f, 5951.0f, 5952.0f, 5953.0f, 5954.0f, 5955.0f, 5956.0f, 5957.0f, 5958.0f, 5959.0f, 5960.0f, 5961.0f, 5962.0f, 5963.0f, 5964.0f, 5965.0f, 5966.0f, 5967.0f, 5968.0f, 5969.0f, 5970.0f, 5971.0f, 5972.0f, 5973.0f, 5974.0f, 5975.0f, 5976.0f, 5977.0f, 5978.0f, 5979.0f, 5980.0f, 5981.0f, 5982.0f, 5983.0f, 5984.0f, 5985.0f, 5986.0f, 5987.0f, 5988.0f, 5989.0f, 5990.0f, 5991.0f, 5992.0f, 5993.0f, 5994.0f, 5995.0f, 5996.0f, 5997.0f, 5998.0f, 5999.0f, -2970.0f, -2971.0f, -2972.0f, -2973.0f, -2974.0f, -2975.0f, -2976.0f, -2977.0f, -2978.0f, -2979.0f, -2980.0f, -2981.0f, -2982.0f, -2983.0f, -2984.0f, -2985.0f, -2986.0f, -2987.0f, -2988.0f, -2989.0f, -2990.0f, -2991.0f, -2992.0f, -2993.0f, -2994.0f, -2995.0f, -2996.0f, -2997.0f, -2998.0f, -2999.0f, 6000.0f, 6001.0f, 6002.0f, 6003.0f, 6004.0f, 6005.0f, 6006.0f, 6007.0f, 6008.0f, 6009.0f, 6010.0f, 6011.0f, 6012.0f, 6013.0f, 6014.0f, 6015.0f, 6016.0f, 6017.0f, 6018.0f, 6019.0f, 6020.0f, 6021.0f, 6022.0f, 6023.0f, 6024.0f, 6025.0f, 6026.0f, 6027.0f, 6028.0f, 6029.0f, 6030.0f, 6031.0f, 6032.0f, 6033.0f, 6034.0f, 6035.0f, 6036.0f, 6037.0f, 6038.0f, 6039.0f, 6040.0f, 6041.0f, 6042.0f, 6043.0f, 6044.0f, 6045.0f, 6046.0f, 6047.0f, 6048.0f, 6049.0f, 6050.0f, 6051.0f, 6052.0f, 6053.0f, 6054.0f, 6055.0f, 6056.0f, 6057.0f, 6058.0f, 6059.0f, -3000.0f, -3001.0f, -3002.0f, -3003.0f, -3004.0f, -3005.0f, -3006.0f, -3007.0f, -3008.0f, -3009.0f, -3010.0f, -3011.0f, -3012.0f, -3013.0f, -3014.0f, -3015.0f, -3016.0f, -3017.0f, -3018.0f, -3019.0f, -3020.0f, -3021.0f, -3022.0f, -3023.0f, -3024.0f, -3025.0f, -3026.0f, -3027.0f, -3028.0f, -3029.0f, 6060.0f, 6061.0f, 6062.0f, 6063.0f, 6064.0f, 6065.0f, 6066.0f, 6067.0f, 6068.0f, 6069.0f, 6070.0f, 6071.0f, 6072.0f, 6073.0f, 6074.0f, 6075.0f, 6076.0f, 6077.0f, 6078.0f, 6079.0f, 6080.0f, 6081.0f, 6082.0f, 6083.0f, 6084.0f, 6085.0f, 6086.0f, 6087.0f, 6088.0f, 6089.0f, 6090.0f, 6091.0f, 6092.0f, 6093.0f, 6094.0f, 6095.0f, 6096.0f, 6097.0f, 6098.0f, 6099.0f, 6100.0f, 6101.0f, 6102.0f, 6103.0f, 6104.0f, 6105.0f, 6106.0f, 6107.0f, 6108.0f, 6109.0f, 6110.0f, 6111.0f, 6112.0f, 6113.0f, 6114.0f, 6115.0f, 6116.0f, 6117.0f, 6118.0f, 6119.0f, -3030.0f, -3031.0f, -3032.0f, -3033.0f, -3034.0f, -3035.0f, -3036.0f, -3037.0f, -3038.0f, -3039.0f, -3040.0f, -3041.0f, -3042.0f, -3043.0f, -3044.0f, -3045.0f, -3046.0f, -3047.0f, -3048.0f, -3049.0f, -3050.0f, -3051.0f, -3052.0f, -3053.0f, -3054.0f, -3055.0f, -3056.0f, -3057.0f, -3058.0f, -3059.0f, 6120.0f, 6121.0f, 6122.0f, 6123.0f, 6124.0f, 6125.0f, 6126.0f, 6127.0f, 6128.0f, 6129.0f, 6130.0f, 6131.0f, 6132.0f, 6133.0f, 6134.0f, 6135.0f, 6136.0f, 6137.0f, 6138.0f, 6139.0f, 6140.0f, 6141.0f, 6142.0f, 6143.0f, 6144.0f, 6145.0f, 6146.0f, 6147.0f, 6148.0f, 6149.0f, 6150.0f, 6151.0f, 6152.0f, 6153.0f, 6154.0f, 6155.0f, 6156.0f, 6157.0f, 6158.0f, 6159.0f, 6160.0f, 6161.0f, 6162.0f, 6163.0f, 6164.0f, 6165.0f, 6166.0f, 6167.0f, 6168.0f, 6169.0f, 6170.0f, 6171.0f, 6172.0f, 6173.0f, 6174.0f, 6175.0f, 6176.0f, 6177.0f, 6178.0f, 6179.0f, -3060.0f, -3061.0f, -3062.0f, -3063.0f, -3064.0f, -3065.0f, -3066.0f, -3067.0f, -3068.0f, -3069.0f, -3070.0f, -3071.0f, -3072.0f, -3073.0f, -3074.0f, -3075.0f, -3076.0f, -3077.0f, -3078.0f, -3079.0f, -3080.0f, -3081.0f, -3082.0f, -3083.0f, -3084.0f, -3085.0f, -3086.0f, -3087.0f, -3088.0f, -3089.0f, 6180.0f, 6181.0f, 6182.0f, 6183.0f, 6184.0f, 6185.0f, 6186.0f, 6187.0f, 6188.0f, 6189.0f, 6190.0f, 6191.0f, 6192.0f, 6193.0f, 6194.0f, 6195.0f, 6196.0f, 6197.0f, 6198.0f, 6199.0f, 6200.0f, 6201.0f, 6202.0f, 6203.0f, 6204.0f, 6205.0f, 6206.0f, 6207.0f, 6208.0f, 6209.0f, 6210.0f, 6211.0f, 6212.0f, 6213.0f, 6214.0f, 6215.0f, 6216.0f, 6217.0f, 6218.0f, 6219.0f, 6220.0f, 6221.0f, 6222.0f, 6223.0f, 6224.0f, 6225.0f, 6226.0f, 6227.0f, 6228.0f, 6229.0f, 6230.0f, 6231.0f, 6232.0f, 6233.0f, 6234.0f, 6235.0f, 6236.0f, 6237.0f, 6238.0f, 6239.0f, -3090.0f, -3091.0f, -3092.0f, -3093.0f, -3094.0f, -3095.0f, -3096.0f, -3097.0f, -3098.0f, -3099.0f, -3100.0f, -3101.0f, -3102.0f, -3103.0f, -3104.0f, -3105.0f, -3106.0f, -3107.0f, -3108.0f, -3109.0f, -3110.0f, -3111.0f, -3112.0f, -3113.0f, -3114.0f, -3115.0f, -3116.0f, -3117.0f, -3118.0f, -3119.0f, 6240.0f, 6241.0f, 6242.0f, 6243.0f, 6244.0f, 6245.0f, 6246.0f, 6247.0f, 6248.0f, 6249.0f, 6250.0f, 6251.0f, 6252.0f, 6253.0f, 6254.0f, 6255.0f, 6256.0f, 6257.0f, 6258.0f, 6259.0f, 6260.0f, 6261.0f, 6262.0f, 6263.0f, 6264.0f, 6265.0f, 6266.0f, 6267.0f, 6268.0f, 6269.0f, 6270.0f, 6271.0f, 6272.0f, 6273.0f, 6274.0f, 6275.0f, 6276.0f, 6277.0f, 6278.0f, 6279.0f, 6280.0f, 6281.0f, 6282.0f, 6283.0f, 6284.0f, 6285.0f, 6286.0f, 6287.0f, 6288.0f, 6289.0f, 6290.0f, 6291.0f, 6292.0f, 6293.0f, 6294.0f, 6295.0f, 6296.0f, 6297.0f, 6298.0f, 6299.0f, -3120.0f, -3121.0f, -3122.0f, -3123.0f, -3124.0f, -3125.0f, -3126.0f, -3127.0f, -3128.0f, -3129.0f, -3130.0f, -3131.0f, -3132.0f, -3133.0f, -3134.0f, -3135.0f, -3136.0f, -3137.0f, -3138.0f, -3139.0f, -3140.0f, -3141.0f, -3142.0f, -3143.0f, -3144.0f, -3145.0f, -3146.0f, -3147.0f, -3148.0f, -3149.0f, 6300.0f, 6301.0f, 6302.0f, 6303.0f, 6304.0f, 6305.0f, 6306.0f, 6307.0f, 6308.0f, 6309.0f, 6310.0f, 6311.0f, 6312.0f, 6313.0f, 6314.0f, 6315.0f, 6316.0f, 6317.0f, 6318.0f, 6319.0f, 6320.0f, 6321.0f, 6322.0f, 6323.0f, 6324.0f, 6325.0f, 6326.0f, 6327.0f, 6328.0f, 6329.0f, 6330.0f, 6331.0f, 6332.0f, 6333.0f, 6334.0f, 6335.0f, 6336.0f, 6337.0f, 6338.0f, 6339.0f, 6340.0f, 6341.0f, 6342.0f, 6343.0f, 6344.0f, 6345.0f, 6346.0f, 6347.0f, 6348.0f, 6349.0f, 6350.0f, 6351.0f, 6352.0f, 6353.0f, 6354.0f, 6355.0f, 6356.0f, 6357.0f, 6358.0f, 6359.0f, -3150.0f, -3151.0f, -3152.0f, -3153.0f, -3154.0f, -3155.0f, -3156.0f, -3157.0f, -3158.0f, -3159.0f, -3160.0f, -3161.0f, -3162.0f, -3163.0f, -3164.0f, -3165.0f, -3166.0f, -3167.0f, -3168.0f, -3169.0f, -3170.0f, -3171.0f, -3172.0f, -3173.0f, -3174.0f, -3175.0f, -3176.0f, -3177.0f, -3178.0f, -3179.0f, 6360.0f, 6361.0f, 6362.0f, 6363.0f, 6364.0f, 6365.0f, 6366.0f, 6367.0f, 6368.0f, 6369.0f, 6370.0f, 6371.0f, 6372.0f, 6373.0f, 6374.0f, 6375.0f, 6376.0f, 6377.0f, 6378.0f, 6379.0f, 6380.0f, 6381.0f, 6382.0f, 6383.0f, 6384.0f, 6385.0f, 6386.0f, 6387.0f, 6388.0f, 6389.0f, 6390.0f, 6391.0f, 6392.0f, 6393.0f, 6394.0f, 6395.0f, 6396.0f, 6397.0f, 6398.0f, 6399.0f, 6400.0f, 6401.0f, 6402.0f, 6403.0f, 6404.0f, 6405.0f, 6406.0f, 6407.0f, 6408.0f, 6409.0f, 6410.0f, 6411.0f, 6412.0f, 6413.0f, 6414.0f, 6415.0f, 6416.0f, 6417.0f, 6418.0f, 6419.0f, -3180.0f, -3181.0f, -3182.0f, -3183.0f, -3184.0f, -3185.0f, -3186.0f, -3187.0f, -3188.0f, -3189.0f, -3190.0f, -3191.0f, -3192.0f, -3193.0f, -3194.0f, -3195.0f, -3196.0f, -3197.0f, -3198.0f, -3199.0f, -3200.0f, -3201.0f, -3202.0f, -3203.0f, -3204.0f, -3205.0f, -3206.0f, -3207.0f, -3208.0f, -3209.0f, 6420.0f, 6421.0f, 6422.0f, 6423.0f, 6424.0f, 6425.0f, 6426.0f, 6427.0f, 6428.0f, 6429.0f, 6430.0f, 6431.0f, 6432.0f, 6433.0f, 6434.0f, 6435.0f, 6436.0f, 6437.0f, 6438.0f, 6439.0f, 6440.0f, 6441.0f, 6442.0f, 6443.0f, 6444.0f, 6445.0f, 6446.0f, 6447.0f, 6448.0f, 6449.0f, 6450.0f, 6451.0f, 6452.0f, 6453.0f, 6454.0f, 6455.0f, 6456.0f, 6457.0f, 6458.0f, 6459.0f, 6460.0f, 6461.0f, 6462.0f, 6463.0f, 6464.0f, 6465.0f, 6466.0f, 6467.0f, 6468.0f, 6469.0f, 6470.0f, 6471.0f, 6472.0f, 6473.0f, 6474.0f, 6475.0f, 6476.0f, 6477.0f, 6478.0f, 6479.0f, -3210.0f, -3211.0f, -3212.0f, -3213.0f, -3214.0f, -3215.0f, -3216.0f, -3217.0f, -3218.0f, -3219.0f, -3220.0f, -3221.0f, -3222.0f, -3223.0f, -3224.0f, -3225.0f, -3226.0f, -3227.0f, -3228.0f, -3229.0f, -3230.0f, -3231.0f, -3232.0f, -3233.0f, -3234.0f, -3235.0f, -3236.0f, -3237.0f, -3238.0f, -3239.0f, 6480.0f, 6481.0f, 6482.0f, 6483.0f, 6484.0f, 6485.0f, 6486.0f, 6487.0f, 6488.0f, 6489.0f, 6490.0f, 6491.0f, 6492.0f, 6493.0f, 6494.0f, 6495.0f, 6496.0f, 6497.0f, 6498.0f, 6499.0f, 6500.0f, 6501.0f, 6502.0f, 6503.0f, 6504.0f, 6505.0f, 6506.0f, 6507.0f, 6508.0f, 6509.0f, 6510.0f, 6511.0f, 6512.0f, 6513.0f, 6514.0f, 6515.0f, 6516.0f, 6517.0f, 6518.0f, 6519.0f, 6520.0f, 6521.0f, 6522.0f, 6523.0f, 6524.0f, 6525.0f, 6526.0f, 6527.0f, 6528.0f, 6529.0f, 6530.0f, 6531.0f, 6532.0f, 6533.0f, 6534.0f, 6535.0f, 6536.0f, 6537.0f, 6538.0f, 6539.0f, -3240.0f, -3241.0f, -3242.0f, -3243.0f, -3244.0f, -3245.0f, -3246.0f, -3247.0f, -3248.0f, -3249.0f, -3250.0f, -3251.0f, -3252.0f, -3253.0f, -3254.0f, -3255.0f, -3256.0f, -3257.0f, -3258.0f, -3259.0f, -3260.0f, -3261.0f, -3262.0f, -3263.0f, -3264.0f, -3265.0f, -3266.0f, -3267.0f, -3268.0f, -3269.0f, 6540.0f, 6541.0f, 6542.0f, 6543.0f, 6544.0f, 6545.0f, 6546.0f, 6547.0f, 6548.0f, 6549.0f, 6550.0f, 6551.0f, 6552.0f, 6553.0f, 6554.0f, 6555.0f, 6556.0f, 6557.0f, 6558.0f, 6559.0f, 6560.0f, 6561.0f, 6562.0f, 6563.0f, 6564.0f, 6565.0f, 6566.0f, 6567.0f, 6568.0f, 6569.0f, 6570.0f, 6571.0f, 6572.0f, 6573.0f, 6574.0f, 6575.0f, 6576.0f, 6577.0f, 6578.0f, 6579.0f, 6580.0f, 6581.0f, 6582.0f, 6583.0f, 6584.0f, 6585.0f, 6586.0f, 6587.0f, 6588.0f, 6589.0f, 6590.0f, 6591.0f, 6592.0f, 6593.0f, 6594.0f, 6595.0f, 6596.0f, 6597.0f, 6598.0f, 6599.0f, -3270.0f, -3271.0f, -3272.0f, -3273.0f, -3274.0f, -3275.0f, -3276.0f, -3277.0f, -3278.0f, -3279.0f, -3280.0f, -3281.0f, -3282.0f, -3283.0f, -3284.0f, -3285.0f, -3286.0f, -3287.0f, -3288.0f, -3289.0f, -3290.0f, -3291.0f, -3292.0f, -3293.0f, -3294.0f, -3295.0f, -3296.0f, -3297.0f, -3298.0f, -3299.0f, 6600.0f, 6601.0f, 6602.0f, 6603.0f, 6604.0f, 6605.0f, 6606.0f, 6607.0f, 6608.0f, 6609.0f, 6610.0f, 6611.0f, 6612.0f, 6613.0f, 6614.0f, 6615.0f, 6616.0f, 6617.0f, 6618.0f, 6619.0f, 6620.0f, 6621.0f, 6622.0f, 6623.0f, 6624.0f, 6625.0f, 6626.0f, 6627.0f, 6628.0f, 6629.0f, 6630.0f, 6631.0f, 6632.0f, 6633.0f, 6634.0f, 6635.0f, 6636.0f, 6637.0f, 6638.0f, 6639.0f, 6640.0f, 6641.0f, 6642.0f, 6643.0f, 6644.0f, 6645.0f, 6646.0f, 6647.0f, 6648.0f, 6649.0f, 6650.0f, 6651.0f, 6652.0f, 6653.0f, 6654.0f, 6655.0f, 6656.0f, 6657.0f, 6658.0f, 6659.0f, -3300.0f, -3301.0f, -3302.0f, -3303.0f, -3304.0f, -3305.0f, -3306.0f, -3307.0f, -3308.0f, -3309.0f, -3310.0f, -3311.0f, -3312.0f, -3313.0f, -3314.0f, -3315.0f, -3316.0f, -3317.0f, -3318.0f, -3319.0f, -3320.0f, -3321.0f, -3322.0f, -3323.0f, -3324.0f, -3325.0f, -3326.0f, -3327.0f, -3328.0f, -3329.0f, 6660.0f, 6661.0f, 6662.0f, 6663.0f, 6664.0f, 6665.0f, 6666.0f, 6667.0f, 6668.0f, 6669.0f, 6670.0f, 6671.0f, 6672.0f, 6673.0f, 6674.0f, 6675.0f, 6676.0f, 6677.0f, 6678.0f, 6679.0f, 6680.0f, 6681.0f, 6682.0f, 6683.0f, 6684.0f, 6685.0f, 6686.0f, 6687.0f, 6688.0f, 6689.0f, 6690.0f, 6691.0f, 6692.0f, 6693.0f, 6694.0f, 6695.0f, 6696.0f, 6697.0f, 6698.0f, 6699.0f, 6700.0f, 6701.0f, 6702.0f, 6703.0f, 6704.0f, 6705.0f, 6706.0f, 6707.0f, 6708.0f, 6709.0f, 6710.0f, 6711.0f, 6712.0f, 6713.0f, 6714.0f, 6715.0f, 6716.0f, 6717.0f, 6718.0f, 6719.0f, -3330.0f, -3331.0f, -3332.0f, -3333.0f, -3334.0f, -3335.0f, -3336.0f, -3337.0f, -3338.0f, -3339.0f, -3340.0f, -3341.0f, -3342.0f, -3343.0f, -3344.0f, -3345.0f, -3346.0f, -3347.0f, -3348.0f, -3349.0f, -3350.0f, -3351.0f, -3352.0f, -3353.0f, -3354.0f, -3355.0f, -3356.0f, -3357.0f, -3358.0f, -3359.0f, 6720.0f, 6721.0f, 6722.0f, 6723.0f, 6724.0f, 6725.0f, 6726.0f, 6727.0f, 6728.0f, 6729.0f, 6730.0f, 6731.0f, 6732.0f, 6733.0f, 6734.0f, 6735.0f, 6736.0f, 6737.0f, 6738.0f, 6739.0f, 6740.0f, 6741.0f, 6742.0f, 6743.0f, 6744.0f, 6745.0f, 6746.0f, 6747.0f, 6748.0f, 6749.0f, 6750.0f, 6751.0f, 6752.0f, 6753.0f, 6754.0f, 6755.0f, 6756.0f, 6757.0f, 6758.0f, 6759.0f, 6760.0f, 6761.0f, 6762.0f, 6763.0f, 6764.0f, 6765.0f, 6766.0f, 6767.0f, 6768.0f, 6769.0f, 6770.0f, 6771.0f, 6772.0f, 6773.0f, 6774.0f, 6775.0f, 6776.0f, 6777.0f, 6778.0f, 6779.0f, -3360.0f, -3361.0f, -3362.0f, -3363.0f, -3364.0f, -3365.0f, -3366.0f, -3367.0f, -3368.0f, -3369.0f, -3370.0f, -3371.0f, -3372.0f, -3373.0f, -3374.0f, -3375.0f, -3376.0f, -3377.0f, -3378.0f, -3379.0f, -3380.0f, -3381.0f, -3382.0f, -3383.0f, -3384.0f, -3385.0f, -3386.0f, -3387.0f, -3388.0f, -3389.0f, 6780.0f, 6781.0f, 6782.0f, 6783.0f, 6784.0f, 6785.0f, 6786.0f, 6787.0f, 6788.0f, 6789.0f, 6790.0f, 6791.0f, 6792.0f, 6793.0f, 6794.0f, 6795.0f, 6796.0f, 6797.0f, 6798.0f, 6799.0f, 6800.0f, 6801.0f, 6802.0f, 6803.0f, 6804.0f, 6805.0f, 6806.0f, 6807.0f, 6808.0f, 6809.0f, 6810.0f, 6811.0f, 6812.0f, 6813.0f, 6814.0f, 6815.0f, 6816.0f, 6817.0f, 6818.0f, 6819.0f, 6820.0f, 6821.0f, 6822.0f, 6823.0f, 6824.0f, 6825.0f, 6826.0f, 6827.0f, 6828.0f, 6829.0f, 6830.0f, 6831.0f, 6832.0f, 6833.0f, 6834.0f, 6835.0f, 6836.0f, 6837.0f, 6838.0f, 6839.0f, -3390.0f, -3391.0f, -3392.0f, -3393.0f, -3394.0f, -3395.0f, -3396.0f, -3397.0f, -3398.0f, -3399.0f, -3400.0f, -3401.0f, -3402.0f, -3403.0f, -3404.0f, -3405.0f, -3406.0f, -3407.0f, -3408.0f, -3409.0f, -3410.0f, -3411.0f, -3412.0f, -3413.0f, -3414.0f, -3415.0f, -3416.0f, -3417.0f, -3418.0f, -3419.0f, 6840.0f, 6841.0f, 6842.0f, 6843.0f, 6844.0f, 6845.0f, 6846.0f, 6847.0f, 6848.0f, 6849.0f, 6850.0f, 6851.0f, 6852.0f, 6853.0f, 6854.0f, 6855.0f, 6856.0f, 6857.0f, 6858.0f, 6859.0f, 6860.0f, 6861.0f, 6862.0f, 6863.0f, 6864.0f, 6865.0f, 6866.0f, 6867.0f, 6868.0f, 6869.0f, 6870.0f, 6871.0f, 6872.0f, 6873.0f, 6874.0f, 6875.0f, 6876.0f, 6877.0f, 6878.0f, 6879.0f, 6880.0f, 6881.0f, 6882.0f, 6883.0f, 6884.0f, 6885.0f, 6886.0f, 6887.0f, 6888.0f, 6889.0f, 6890.0f, 6891.0f, 6892.0f, 6893.0f, 6894.0f, 6895.0f, 6896.0f, 6897.0f, 6898.0f, 6899.0f, -3420.0f, -3421.0f, -3422.0f, -3423.0f, -3424.0f, -3425.0f, -3426.0f, -3427.0f, -3428.0f, -3429.0f, -3430.0f, -3431.0f, -3432.0f, -3433.0f, -3434.0f, -3435.0f, -3436.0f, -3437.0f, -3438.0f, -3439.0f, -3440.0f, -3441.0f, -3442.0f, -3443.0f, -3444.0f, -3445.0f, -3446.0f, -3447.0f, -3448.0f, -3449.0f, 6900.0f, 6901.0f, 6902.0f, 6903.0f, 6904.0f, 6905.0f, 6906.0f, 6907.0f, 6908.0f, 6909.0f, 6910.0f, 6911.0f, 6912.0f, 6913.0f, 6914.0f, 6915.0f, 6916.0f, 6917.0f, 6918.0f, 6919.0f, 6920.0f, 6921.0f, 6922.0f, 6923.0f, 6924.0f, 6925.0f, 6926.0f, 6927.0f, 6928.0f, 6929.0f, 6930.0f, 6931.0f, 6932.0f, 6933.0f, 6934.0f, 6935.0f, 6936.0f, 6937.0f, 6938.0f, 6939.0f, 6940.0f, 6941.0f, 6942.0f, 6943.0f, 6944.0f, 6945.0f, 6946.0f, 6947.0f, 6948.0f, 6949.0f, 6950.0f, 6951.0f, 6952.0f, 6953.0f, 6954.0f, 6955.0f, 6956.0f, 6957.0f, 6958.0f, 6959.0f, -3450.0f, -3451.0f, -3452.0f, -3453.0f, -3454.0f, -3455.0f, -3456.0f, -3457.0f, -3458.0f, -3459.0f, -3460.0f, -3461.0f, -3462.0f, -3463.0f, -3464.0f, -3465.0f, -3466.0f, -3467.0f, -3468.0f, -3469.0f, -3470.0f, -3471.0f, -3472.0f, -3473.0f, -3474.0f, -3475.0f, -3476.0f, -3477.0f, -3478.0f, -3479.0f, 6960.0f, 6961.0f, 6962.0f, 6963.0f, 6964.0f, 6965.0f, 6966.0f, 6967.0f, 6968.0f, 6969.0f, 6970.0f, 6971.0f, 6972.0f, 6973.0f, 6974.0f, 6975.0f, 6976.0f, 6977.0f, 6978.0f, 6979.0f, 6980.0f, 6981.0f, 6982.0f, 6983.0f, 6984.0f, 6985.0f, 6986.0f, 6987.0f, 6988.0f, 6989.0f, 6990.0f, 6991.0f, 6992.0f, 6993.0f, 6994.0f, 6995.0f, 6996.0f, 6997.0f, 6998.0f, 6999.0f, 7000.0f, 7001.0f, 7002.0f, 7003.0f, 7004.0f, 7005.0f, 7006.0f, 7007.0f, 7008.0f, 7009.0f, 7010.0f, 7011.0f, 7012.0f, 7013.0f, 7014.0f, 7015.0f, 7016.0f, 7017.0f, 7018.0f, 7019.0f, -3480.0f, -3481.0f, -3482.0f, -3483.0f, -3484.0f, -3485.0f, -3486.0f, -3487.0f, -3488.0f, -3489.0f, -3490.0f, -3491.0f, -3492.0f, -3493.0f, -3494.0f, -3495.0f, -3496.0f, -3497.0f, -3498.0f, -3499.0f, -3500.0f, -3501.0f, -3502.0f, -3503.0f, -3504.0f, -3505.0f, -3506.0f, -3507.0f, -3508.0f, -3509.0f, 7020.0f, 7021.0f, 7022.0f, 7023.0f, 7024.0f, 7025.0f, 7026.0f, 7027.0f, 7028.0f, 7029.0f, 7030.0f, 7031.0f, 7032.0f, 7033.0f, 7034.0f, 7035.0f, 7036.0f, 7037.0f, 7038.0f, 7039.0f, 7040.0f, 7041.0f, 7042.0f, 7043.0f, 7044.0f, 7045.0f, 7046.0f, 7047.0f, 7048.0f, 7049.0f, 7050.0f, 7051.0f, 7052.0f, 7053.0f, 7054.0f, 7055.0f, 7056.0f, 7057.0f, 7058.0f, 7059.0f, 7060.0f, 7061.0f, 7062.0f, 7063.0f, 7064.0f, 7065.0f, 7066.0f, 7067.0f, 7068.0f, 7069.0f, 7070.0f, 7071.0f, 7072.0f, 7073.0f, 7074.0f, 7075.0f, 7076.0f, 7077.0f, 7078.0f, 7079.0f, -3510.0f, -3511.0f, -3512.0f, -3513.0f, -3514.0f, -3515.0f, -3516.0f, -3517.0f, -3518.0f, -3519.0f, -3520.0f, -3521.0f, -3522.0f, -3523.0f, -3524.0f, -3525.0f, -3526.0f, -3527.0f, -3528.0f, -3529.0f, -3530.0f, -3531.0f, -3532.0f, -3533.0f, -3534.0f, -3535.0f, -3536.0f, -3537.0f, -3538.0f, -3539.0f, 7080.0f, 7081.0f, 7082.0f, 7083.0f, 7084.0f, 7085.0f, 7086.0f, 7087.0f, 7088.0f, 7089.0f, 7090.0f, 7091.0f, 7092.0f, 7093.0f, 7094.0f, 7095.0f, 7096.0f, 7097.0f, 7098.0f, 7099.0f, 7100.0f, 7101.0f, 7102.0f, 7103.0f, 7104.0f, 7105.0f, 7106.0f, 7107.0f, 7108.0f, 7109.0f, 7110.0f, 7111.0f, 7112.0f, 7113.0f, 7114.0f, 7115.0f, 7116.0f, 7117.0f, 7118.0f, 7119.0f, 7120.0f, 7121.0f, 7122.0f, 7123.0f, 7124.0f, 7125.0f, 7126.0f, 7127.0f, 7128.0f, 7129.0f, 7130.0f, 7131.0f, 7132.0f, 7133.0f, 7134.0f, 7135.0f, 7136.0f, 7137.0f, 7138.0f, 7139.0f, -3540.0f, -3541.0f, -3542.0f, -3543.0f, -3544.0f, -3545.0f, -3546.0f, -3547.0f, -3548.0f, -3549.0f, -3550.0f, -3551.0f, -3552.0f, -3553.0f, -3554.0f, -3555.0f, -3556.0f, -3557.0f, -3558.0f, -3559.0f, -3560.0f, -3561.0f, -3562.0f, -3563.0f, -3564.0f, -3565.0f, -3566.0f, -3567.0f, -3568.0f, -3569.0f, 7140.0f, 7141.0f, 7142.0f, 7143.0f, 7144.0f, 7145.0f, 7146.0f, 7147.0f, 7148.0f, 7149.0f, 7150.0f, 7151.0f, 7152.0f, 7153.0f, 7154.0f, 7155.0f, 7156.0f, 7157.0f, 7158.0f, 7159.0f, 7160.0f, 7161.0f, 7162.0f, 7163.0f, 7164.0f, 7165.0f, 7166.0f, 7167.0f, 7168.0f, 7169.0f, 7170.0f, 7171.0f, 7172.0f, 7173.0f, 7174.0f, 7175.0f, 7176.0f, 7177.0f, 7178.0f, 7179.0f, 7180.0f, 7181.0f, 7182.0f, 7183.0f, 7184.0f, 7185.0f, 7186.0f, 7187.0f, 7188.0f, 7189.0f, 7190.0f, 7191.0f, 7192.0f, 7193.0f, 7194.0f, 7195.0f, 7196.0f, 7197.0f, 7198.0f, 7199.0f, -3570.0f, -3571.0f, -3572.0f, -3573.0f, -3574.0f, -3575.0f, -3576.0f, -3577.0f, -3578.0f, -3579.0f, -3580.0f, -3581.0f, -3582.0f, -3583.0f, -3584.0f, -3585.0f, -3586.0f, -3587.0f, -3588.0f, -3589.0f, -3590.0f, -3591.0f, -3592.0f, -3593.0f, -3594.0f, -3595.0f, -3596.0f, -3597.0f, -3598.0f, -3599.0f, 7200.0f, 7201.0f, 7202.0f, 7203.0f, 7204.0f, 7205.0f, 7206.0f, 7207.0f, 7208.0f, 7209.0f, 7210.0f, 7211.0f, 7212.0f, 7213.0f, 7214.0f, 7215.0f, 7216.0f, 7217.0f, 7218.0f, 7219.0f, 7220.0f, 7221.0f, 7222.0f, 7223.0f, 7224.0f, 7225.0f, 7226.0f, 7227.0f, 7228.0f, 7229.0f, 7230.0f, 7231.0f, 7232.0f, 7233.0f, 7234.0f, 7235.0f, 7236.0f, 7237.0f, 7238.0f, 7239.0f, 7240.0f, 7241.0f, 7242.0f, 7243.0f, 7244.0f, 7245.0f, 7246.0f, 7247.0f, 7248.0f, 7249.0f, 7250.0f, 7251.0f, 7252.0f, 7253.0f, 7254.0f, 7255.0f, 7256.0f, 7257.0f, 7258.0f, 7259.0f, -3600.0f, -3601.0f, -3602.0f, -3603.0f, -3604.0f, -3605.0f, -3606.0f, -3607.0f, -3608.0f, -3609.0f, -3610.0f, -3611.0f, -3612.0f, -3613.0f, -3614.0f, -3615.0f, -3616.0f, -3617.0f, -3618.0f, -3619.0f, -3620.0f, -3621.0f, -3622.0f, -3623.0f, -3624.0f, -3625.0f, -3626.0f, -3627.0f, -3628.0f, -3629.0f, 7260.0f, 7261.0f, 7262.0f, 7263.0f, 7264.0f, 7265.0f, 7266.0f, 7267.0f, 7268.0f, 7269.0f, 7270.0f, 7271.0f, 7272.0f, 7273.0f, 7274.0f, 7275.0f, 7276.0f, 7277.0f, 7278.0f, 7279.0f, 7280.0f, 7281.0f, 7282.0f, 7283.0f, 7284.0f, 7285.0f, 7286.0f, 7287.0f, 7288.0f, 7289.0f, 7290.0f, 7291.0f, 7292.0f, 7293.0f, 7294.0f, 7295.0f, 7296.0f, 7297.0f, 7298.0f, 7299.0f, 7300.0f, 7301.0f, 7302.0f, 7303.0f, 7304.0f, 7305.0f, 7306.0f, 7307.0f, 7308.0f, 7309.0f, 7310.0f, 7311.0f, 7312.0f, 7313.0f, 7314.0f, 7315.0f, 7316.0f, 7317.0f, 7318.0f, 7319.0f, -3630.0f, -3631.0f, -3632.0f, -3633.0f, -3634.0f, -3635.0f, -3636.0f, -3637.0f, -3638.0f, -3639.0f, -3640.0f, -3641.0f, -3642.0f, -3643.0f, -3644.0f, -3645.0f, -3646.0f, -3647.0f, -3648.0f, -3649.0f, -3650.0f, -3651.0f, -3652.0f, -3653.0f, -3654.0f, -3655.0f, -3656.0f, -3657.0f, -3658.0f, -3659.0f, 7320.0f, 7321.0f, 7322.0f, 7323.0f, 7324.0f, 7325.0f, 7326.0f, 7327.0f, 7328.0f, 7329.0f, 7330.0f, 7331.0f, 7332.0f, 7333.0f, 7334.0f, 7335.0f, 7336.0f, 7337.0f, 7338.0f, 7339.0f, 7340.0f, 7341.0f, 7342.0f, 7343.0f, 7344.0f, 7345.0f, 7346.0f, 7347.0f, 7348.0f, 7349.0f, 7350.0f, 7351.0f, 7352.0f, 7353.0f, 7354.0f, 7355.0f, 7356.0f, 7357.0f, 7358.0f, 7359.0f, 7360.0f, 7361.0f, 7362.0f, 7363.0f, 7364.0f, 7365.0f, 7366.0f, 7367.0f, 7368.0f, 7369.0f, 7370.0f, 7371.0f, 7372.0f, 7373.0f, 7374.0f, 7375.0f, 7376.0f, 7377.0f, 7378.0f, 7379.0f, -3660.0f, -3661.0f, -3662.0f, -3663.0f, -3664.0f, -3665.0f, -3666.0f, -3667.0f, -3668.0f, -3669.0f, -3670.0f, -3671.0f, -3672.0f, -3673.0f, -3674.0f, -3675.0f, -3676.0f, -3677.0f, -3678.0f, -3679.0f, -3680.0f, -3681.0f, -3682.0f, -3683.0f, -3684.0f, -3685.0f, -3686.0f, -3687.0f, -3688.0f, -3689.0f, 7380.0f, 7381.0f, 7382.0f, 7383.0f, 7384.0f, 7385.0f, 7386.0f, 7387.0f, 7388.0f, 7389.0f, 7390.0f, 7391.0f, 7392.0f, 7393.0f, 7394.0f, 7395.0f, 7396.0f, 7397.0f, 7398.0f, 7399.0f, 7400.0f, 7401.0f, 7402.0f, 7403.0f, 7404.0f, 7405.0f, 7406.0f, 7407.0f, 7408.0f, 7409.0f, 7410.0f, 7411.0f, 7412.0f, 7413.0f, 7414.0f, 7415.0f, 7416.0f, 7417.0f, 7418.0f, 7419.0f, 7420.0f, 7421.0f, 7422.0f, 7423.0f, 7424.0f, 7425.0f, 7426.0f, 7427.0f, 7428.0f, 7429.0f, 7430.0f, 7431.0f, 7432.0f, 7433.0f, 7434.0f, 7435.0f, 7436.0f, 7437.0f, 7438.0f, 7439.0f, -3690.0f, -3691.0f, -3692.0f, -3693.0f, -3694.0f, -3695.0f, -3696.0f, -3697.0f, -3698.0f, -3699.0f, -3700.0f, -3701.0f, -3702.0f, -3703.0f, -3704.0f, -3705.0f, -3706.0f, -3707.0f, -3708.0f, -3709.0f, -3710.0f, -3711.0f, -3712.0f, -3713.0f, -3714.0f, -3715.0f, -3716.0f, -3717.0f, -3718.0f, -3719.0f, 7440.0f, 7441.0f, 7442.0f, 7443.0f, 7444.0f, 7445.0f, 7446.0f, 7447.0f, 7448.0f, 7449.0f, 7450.0f, 7451.0f, 7452.0f, 7453.0f, 7454.0f, 7455.0f, 7456.0f, 7457.0f, 7458.0f, 7459.0f, 7460.0f, 7461.0f, 7462.0f, 7463.0f, 7464.0f, 7465.0f, 7466.0f, 7467.0f, 7468.0f, 7469.0f, 7470.0f, 7471.0f, 7472.0f, 7473.0f, 7474.0f, 7475.0f, 7476.0f, 7477.0f, 7478.0f, 7479.0f, 7480.0f, 7481.0f, 7482.0f, 7483.0f, 7484.0f, 7485.0f, 7486.0f, 7487.0f, 7488.0f, 7489.0f, 7490.0f, 7491.0f, 7492.0f, 7493.0f, 7494.0f, 7495.0f, 7496.0f, 7497.0f, 7498.0f, 7499.0f, -3720.0f, -3721.0f, -3722.0f, -3723.0f, -3724.0f, -3725.0f, -3726.0f, -3727.0f, -3728.0f, -3729.0f, -3730.0f, -3731.0f, -3732.0f, -3733.0f, -3734.0f, -3735.0f, -3736.0f, -3737.0f, -3738.0f, -3739.0f, -3740.0f, -3741.0f, -3742.0f, -3743.0f, -3744.0f, -3745.0f, -3746.0f, -3747.0f, -3748.0f, -3749.0f, 7500.0f, 7501.0f, 7502.0f, 7503.0f, 7504.0f, 7505.0f, 7506.0f, 7507.0f, 7508.0f, 7509.0f, 7510.0f, 7511.0f, 7512.0f, 7513.0f, 7514.0f, 7515.0f, 7516.0f, 7517.0f, 7518.0f, 7519.0f, 7520.0f, 7521.0f, 7522.0f, 7523.0f, 7524.0f, 7525.0f, 7526.0f, 7527.0f, 7528.0f, 7529.0f, 7530.0f, 7531.0f, 7532.0f, 7533.0f, 7534.0f, 7535.0f, 7536.0f, 7537.0f, 7538.0f, 7539.0f, 7540.0f, 7541.0f, 7542.0f, 7543.0f, 7544.0f, 7545.0f, 7546.0f, 7547.0f, 7548.0f, 7549.0f, 7550.0f, 7551.0f, 7552.0f, 7553.0f, 7554.0f, 7555.0f, 7556.0f, 7557.0f, 7558.0f, 7559.0f, -3750.0f, -3751.0f, -3752.0f, -3753.0f, -3754.0f, -3755.0f, -3756.0f, -3757.0f, -3758.0f, -3759.0f, -3760.0f, -3761.0f, -3762.0f, -3763.0f, -3764.0f, -3765.0f, -3766.0f, -3767.0f, -3768.0f, -3769.0f, -3770.0f, -3771.0f, -3772.0f, -3773.0f, -3774.0f, -3775.0f, -3776.0f, -3777.0f, -3778.0f, -3779.0f, 7560.0f, 7561.0f, 7562.0f, 7563.0f, 7564.0f, 7565.0f, 7566.0f, 7567.0f, 7568.0f, 7569.0f, 7570.0f, 7571.0f, 7572.0f, 7573.0f, 7574.0f, 7575.0f, 7576.0f, 7577.0f, 7578.0f, 7579.0f, 7580.0f, 7581.0f, 7582.0f, 7583.0f, 7584.0f, 7585.0f, 7586.0f, 7587.0f, 7588.0f, 7589.0f, 7590.0f, 7591.0f, 7592.0f, 7593.0f, 7594.0f, 7595.0f, 7596.0f, 7597.0f, 7598.0f, 7599.0f, 7600.0f, 7601.0f, 7602.0f, 7603.0f, 7604.0f, 7605.0f, 7606.0f, 7607.0f, 7608.0f, 7609.0f, 7610.0f, 7611.0f, 7612.0f, 7613.0f, 7614.0f, 7615.0f, 7616.0f, 7617.0f, 7618.0f, 7619.0f, -3780.0f, -3781.0f, -3782.0f, -3783.0f, -3784.0f, -3785.0f, -3786.0f, -3787.0f, -3788.0f, -3789.0f, -3790.0f, -3791.0f, -3792.0f, -3793.0f, -3794.0f, -3795.0f, -3796.0f, -3797.0f, -3798.0f, -3799.0f, -3800.0f, -3801.0f, -3802.0f, -3803.0f, -3804.0f, -3805.0f, -3806.0f, -3807.0f, -3808.0f, -3809.0f, 7620.0f, 7621.0f, 7622.0f, 7623.0f, 7624.0f, 7625.0f, 7626.0f, 7627.0f, 7628.0f, 7629.0f, 7630.0f, 7631.0f, 7632.0f, 7633.0f, 7634.0f, 7635.0f, 7636.0f, 7637.0f, 7638.0f, 7639.0f, 7640.0f, 7641.0f, 7642.0f, 7643.0f, 7644.0f, 7645.0f, 7646.0f, 7647.0f, 7648.0f, 7649.0f, 7650.0f, 7651.0f, 7652.0f, 7653.0f, 7654.0f, 7655.0f, 7656.0f, 7657.0f, 7658.0f, 7659.0f, 7660.0f, 7661.0f, 7662.0f, 7663.0f, 7664.0f, 7665.0f, 7666.0f, 7667.0f, 7668.0f, 7669.0f, 7670.0f, 7671.0f, 7672.0f, 7673.0f, 7674.0f, 7675.0f, 7676.0f, 7677.0f, 7678.0f, 7679.0f, -3810.0f, -3811.0f, -3812.0f, -3813.0f, -3814.0f, -3815.0f, -3816.0f, -3817.0f, -3818.0f, -3819.0f, -3820.0f, -3821.0f, -3822.0f, -3823.0f, -3824.0f, -3825.0f, -3826.0f, -3827.0f, -3828.0f, -3829.0f, -3830.0f, -3831.0f, -3832.0f, -3833.0f, -3834.0f, -3835.0f, -3836.0f, -3837.0f, -3838.0f, -3839.0f, 7680.0f, 7681.0f, 7682.0f, 7683.0f, 7684.0f, 7685.0f, 7686.0f, 7687.0f, 7688.0f, 7689.0f, 7690.0f, 7691.0f, 7692.0f, 7693.0f, 7694.0f, 7695.0f, 7696.0f, 7697.0f, 7698.0f, 7699.0f, 7700.0f, 7701.0f, 7702.0f, 7703.0f, 7704.0f, 7705.0f, 7706.0f, 7707.0f, 7708.0f, 7709.0f, 7710.0f, 7711.0f, 7712.0f, 7713.0f, 7714.0f, 7715.0f, 7716.0f, 7717.0f, 7718.0f, 7719.0f, 7720.0f, 7721.0f, 7722.0f, 7723.0f, 7724.0f, 7725.0f, 7726.0f, 7727.0f, 7728.0f, 7729.0f, 7730.0f, 7731.0f, 7732.0f, 7733.0f, 7734.0f, 7735.0f, 7736.0f, 7737.0f, 7738.0f, 7739.0f, -3840.0f, -3841.0f, -3842.0f, -3843.0f, -3844.0f, -3845.0f, -3846.0f, -3847.0f, -3848.0f, -3849.0f, -3850.0f, -3851.0f, -3852.0f, -3853.0f, -3854.0f, -3855.0f, -3856.0f, -3857.0f, -3858.0f, -3859.0f, -3860.0f, -3861.0f, -3862.0f, -3863.0f, -3864.0f, -3865.0f, -3866.0f, -3867.0f, -3868.0f, -3869.0f, 7740.0f, 7741.0f, 7742.0f, 7743.0f, 7744.0f, 7745.0f, 7746.0f, 7747.0f, 7748.0f, 7749.0f, 7750.0f, 7751.0f, 7752.0f, 7753.0f, 7754.0f, 7755.0f, 7756.0f, 7757.0f, 7758.0f, 7759.0f, 7760.0f, 7761.0f, 7762.0f, 7763.0f, 7764.0f, 7765.0f, 7766.0f, 7767.0f, 7768.0f, 7769.0f, 7770.0f, 7771.0f, 7772.0f, 7773.0f, 7774.0f, 7775.0f, 7776.0f, 7777.0f, 7778.0f, 7779.0f, 7780.0f, 7781.0f, 7782.0f, 7783.0f, 7784.0f, 7785.0f, 7786.0f, 7787.0f, 7788.0f, 7789.0f, 7790.0f, 7791.0f, 7792.0f, 7793.0f, 7794.0f, 7795.0f, 7796.0f, 7797.0f, 7798.0f, 7799.0f, -3870.0f, -3871.0f, -3872.0f, -3873.0f, -3874.0f, -3875.0f, -3876.0f, -3877.0f, -3878.0f, -3879.0f, -3880.0f, -3881.0f, -3882.0f, -3883.0f, -3884.0f, -3885.0f, -3886.0f, -3887.0f, -3888.0f, -3889.0f, -3890.0f, -3891.0f, -3892.0f, -3893.0f, -3894.0f, -3895.0f, -3896.0f, -3897.0f, -3898.0f, -3899.0f, 7800.0f, 7801.0f, 7802.0f, 7803.0f, 7804.0f, 7805.0f, 7806.0f, 7807.0f, 7808.0f, 7809.0f, 7810.0f, 7811.0f, 7812.0f, 7813.0f, 7814.0f, 7815.0f, 7816.0f, 7817.0f, 7818.0f, 7819.0f, 7820.0f, 7821.0f, 7822.0f, 7823.0f, 7824.0f, 7825.0f, 7826.0f, 7827.0f, 7828.0f, 7829.0f, 7830.0f, 7831.0f, 7832.0f, 7833.0f, 7834.0f, 7835.0f, 7836.0f, 7837.0f, 7838.0f, 7839.0f, 7840.0f, 7841.0f, 7842.0f, 7843.0f, 7844.0f, 7845.0f, 7846.0f, 7847.0f, 7848.0f, 7849.0f, 7850.0f, 7851.0f, 7852.0f, 7853.0f, 7854.0f, 7855.0f, 7856.0f, 7857.0f, 7858.0f, 7859.0f, -3900.0f, -3901.0f, -3902.0f, -3903.0f, -3904.0f, -3905.0f, -3906.0f, -3907.0f, -3908.0f, -3909.0f, -3910.0f, -3911.0f, -3912.0f, -3913.0f, -3914.0f, -3915.0f, -3916.0f, -3917.0f, -3918.0f, -3919.0f, -3920.0f, -3921.0f, -3922.0f, -3923.0f, -3924.0f, -3925.0f, -3926.0f, -3927.0f, -3928.0f, -3929.0f, 7860.0f, 7861.0f, 7862.0f, 7863.0f, 7864.0f, 7865.0f, 7866.0f, 7867.0f, 7868.0f, 7869.0f, 7870.0f, 7871.0f, 7872.0f, 7873.0f, 7874.0f, 7875.0f, 7876.0f, 7877.0f, 7878.0f, 7879.0f, 7880.0f, 7881.0f, 7882.0f, 7883.0f, 7884.0f, 7885.0f, 7886.0f, 7887.0f, 7888.0f, 7889.0f, 7890.0f, 7891.0f, 7892.0f, 7893.0f, 7894.0f, 7895.0f, 7896.0f, 7897.0f, 7898.0f, 7899.0f, 7900.0f, 7901.0f, 7902.0f, 7903.0f, 7904.0f, 7905.0f, 7906.0f, 7907.0f, 7908.0f, 7909.0f, 7910.0f, 7911.0f, 7912.0f, 7913.0f, 7914.0f, 7915.0f, 7916.0f, 7917.0f, 7918.0f, 7919.0f, -3930.0f, -3931.0f, -3932.0f, -3933.0f, -3934.0f, -3935.0f, -3936.0f, -3937.0f, -3938.0f, -3939.0f, -3940.0f, -3941.0f, -3942.0f, -3943.0f, -3944.0f, -3945.0f, -3946.0f, -3947.0f, -3948.0f, -3949.0f, -3950.0f, -3951.0f, -3952.0f, -3953.0f, -3954.0f, -3955.0f, -3956.0f, -3957.0f, -3958.0f, -3959.0f, 7920.0f, 7921.0f, 7922.0f, 7923.0f, 7924.0f, 7925.0f, 7926.0f, 7927.0f, 7928.0f, 7929.0f, 7930.0f, 7931.0f, 7932.0f, 7933.0f, 7934.0f, 7935.0f, 7936.0f, 7937.0f, 7938.0f, 7939.0f, 7940.0f, 7941.0f, 7942.0f, 7943.0f, 7944.0f, 7945.0f, 7946.0f, 7947.0f, 7948.0f, 7949.0f, 7950.0f, 7951.0f, 7952.0f, 7953.0f, 7954.0f, 7955.0f, 7956.0f, 7957.0f, 7958.0f, 7959.0f, 7960.0f, 7961.0f, 7962.0f, 7963.0f, 7964.0f, 7965.0f, 7966.0f, 7967.0f, 7968.0f, 7969.0f, 7970.0f, 7971.0f, 7972.0f, 7973.0f, 7974.0f, 7975.0f, 7976.0f, 7977.0f, 7978.0f, 7979.0f, -3960.0f, -3961.0f, -3962.0f, -3963.0f, -3964.0f, -3965.0f, -3966.0f, -3967.0f, -3968.0f, -3969.0f, -3970.0f, -3971.0f, -3972.0f, -3973.0f, -3974.0f, -3975.0f, -3976.0f, -3977.0f, -3978.0f, -3979.0f, -3980.0f, -3981.0f, -3982.0f, -3983.0f, -3984.0f, -3985.0f, -3986.0f, -3987.0f, -3988.0f, -3989.0f, 7980.0f, 7981.0f, 7982.0f, 7983.0f, 7984.0f, 7985.0f, 7986.0f, 7987.0f, 7988.0f, 7989.0f, 7990.0f, 7991.0f, 7992.0f, 7993.0f, 7994.0f, 7995.0f, 7996.0f, 7997.0f, 7998.0f, 7999.0f, 8000.0f, 8001.0f, 8002.0f, 8003.0f, 8004.0f, 8005.0f, 8006.0f, 8007.0f, 8008.0f, 8009.0f, 8010.0f, 8011.0f, 8012.0f, 8013.0f, 8014.0f, 8015.0f, 8016.0f, 8017.0f, 8018.0f, 8019.0f, 8020.0f, 8021.0f, 8022.0f, 8023.0f, 8024.0f, 8025.0f, 8026.0f, 8027.0f, 8028.0f, 8029.0f, 8030.0f, 8031.0f, 8032.0f, 8033.0f, 8034.0f, 8035.0f, 8036.0f, 8037.0f, 8038.0f, 8039.0f, -3990.0f, -3991.0f, -3992.0f, -3993.0f, -3994.0f, -3995.0f, -3996.0f, -3997.0f, -3998.0f, -3999.0f, -4000.0f, -4001.0f, -4002.0f, -4003.0f, -4004.0f, -4005.0f, -4006.0f, -4007.0f, -4008.0f, -4009.0f, -4010.0f, -4011.0f, -4012.0f, -4013.0f, -4014.0f, -4015.0f, -4016.0f, -4017.0f, -4018.0f, -4019.0f, 8040.0f, 8041.0f, 8042.0f, 8043.0f, 8044.0f, 8045.0f, 8046.0f, 8047.0f, 8048.0f, 8049.0f, 8050.0f, 8051.0f, 8052.0f, 8053.0f, 8054.0f, 8055.0f, 8056.0f, 8057.0f, 8058.0f, 8059.0f, 8060.0f, 8061.0f, 8062.0f, 8063.0f, 8064.0f, 8065.0f, 8066.0f, 8067.0f, 8068.0f, 8069.0f, 8070.0f, 8071.0f, 8072.0f, 8073.0f, 8074.0f, 8075.0f, 8076.0f, 8077.0f, 8078.0f, 8079.0f, 8080.0f, 8081.0f, 8082.0f, 8083.0f, 8084.0f, 8085.0f, 8086.0f, 8087.0f, 8088.0f, 8089.0f, 8090.0f, 8091.0f, 8092.0f, 8093.0f, 8094.0f, 8095.0f, 8096.0f, 8097.0f, 8098.0f, 8099.0f, -4020.0f, -4021.0f, -4022.0f, -4023.0f, -4024.0f, -4025.0f, -4026.0f, -4027.0f, -4028.0f, -4029.0f, -4030.0f, -4031.0f, -4032.0f, -4033.0f, -4034.0f, -4035.0f, -4036.0f, -4037.0f, -4038.0f, -4039.0f, -4040.0f, -4041.0f, -4042.0f, -4043.0f, -4044.0f, -4045.0f, -4046.0f, -4047.0f, -4048.0f, -4049.0f, 8100.0f, 8101.0f, 8102.0f, 8103.0f, 8104.0f, 8105.0f, 8106.0f, 8107.0f, 8108.0f, 8109.0f, 8110.0f, 8111.0f, 8112.0f, 8113.0f, 8114.0f, 8115.0f, 8116.0f, 8117.0f, 8118.0f, 8119.0f, 8120.0f, 8121.0f, 8122.0f, 8123.0f, 8124.0f, 8125.0f, 8126.0f, 8127.0f, 8128.0f, 8129.0f, 8130.0f, 8131.0f, 8132.0f, 8133.0f, 8134.0f, 8135.0f, 8136.0f, 8137.0f, 8138.0f, 8139.0f, 8140.0f, 8141.0f, 8142.0f, 8143.0f, 8144.0f, 8145.0f, 8146.0f, 8147.0f, 8148.0f, 8149.0f, 8150.0f, 8151.0f, 8152.0f, 8153.0f, 8154.0f, 8155.0f, 8156.0f, 8157.0f, 8158.0f, 8159.0f, -4050.0f, -4051.0f, -4052.0f, -4053.0f, -4054.0f, -4055.0f, -4056.0f, -4057.0f, -4058.0f, -4059.0f, -4060.0f, -4061.0f, -4062.0f, -4063.0f, -4064.0f, -4065.0f, -4066.0f, -4067.0f, -4068.0f, -4069.0f, -4070.0f, -4071.0f, -4072.0f, -4073.0f, -4074.0f, -4075.0f, -4076.0f, -4077.0f, -4078.0f, -4079.0f, 8160.0f, 8161.0f, 8162.0f, 8163.0f, 8164.0f, 8165.0f, 8166.0f, 8167.0f, 8168.0f, 8169.0f, 8170.0f, 8171.0f, 8172.0f, 8173.0f, 8174.0f, 8175.0f, 8176.0f, 8177.0f, 8178.0f, 8179.0f, 8180.0f, 8181.0f, 8182.0f, 8183.0f, 8184.0f, 8185.0f, 8186.0f, 8187.0f, 8188.0f, 8189.0f, 8190.0f, 8191.0f, 8192.0f, 8193.0f, 8194.0f, 8195.0f, 8196.0f, 8197.0f, 8198.0f, 8199.0f, 8200.0f, 8201.0f, 8202.0f, 8203.0f, 8204.0f, 8205.0f, 8206.0f, 8207.0f, 8208.0f, 8209.0f, 8210.0f, 8211.0f, 8212.0f, 8213.0f, 8214.0f, 8215.0f, 8216.0f, 8217.0f, 8218.0f, 8219.0f, -4080.0f, -4081.0f, -4082.0f, -4083.0f, -4084.0f, -4085.0f, -4086.0f, -4087.0f, -4088.0f, -4089.0f, -4090.0f, -4091.0f, -4092.0f, -4093.0f, -4094.0f, -4095.0f, -4096.0f, -4097.0f, -4098.0f, -4099.0f, -4100.0f, -4101.0f, -4102.0f, -4103.0f, -4104.0f, -4105.0f, -4106.0f, -4107.0f, -4108.0f, -4109.0f, 8220.0f, 8221.0f, 8222.0f, 8223.0f, 8224.0f, 8225.0f, 8226.0f, 8227.0f, 8228.0f, 8229.0f, 8230.0f, 8231.0f, 8232.0f, 8233.0f, 8234.0f, 8235.0f, 8236.0f, 8237.0f, 8238.0f, 8239.0f, 8240.0f, 8241.0f, 8242.0f, 8243.0f, 8244.0f, 8245.0f, 8246.0f, 8247.0f, 8248.0f, 8249.0f, 8250.0f, 8251.0f, 8252.0f, 8253.0f, 8254.0f, 8255.0f, 8256.0f, 8257.0f, 8258.0f, 8259.0f, 8260.0f, 8261.0f, 8262.0f, 8263.0f, 8264.0f, 8265.0f, 8266.0f, 8267.0f, 8268.0f, 8269.0f, 8270.0f, 8271.0f, 8272.0f, 8273.0f, 8274.0f, 8275.0f, 8276.0f, 8277.0f, 8278.0f, 8279.0f, -4110.0f, -4111.0f, -4112.0f, -4113.0f, -4114.0f, -4115.0f, -4116.0f, -4117.0f, -4118.0f, -4119.0f, -4120.0f, -4121.0f, -4122.0f, -4123.0f, -4124.0f, -4125.0f, -4126.0f, -4127.0f, -4128.0f, -4129.0f, -4130.0f, -4131.0f, -4132.0f, -4133.0f, -4134.0f, -4135.0f, -4136.0f, -4137.0f, -4138.0f, -4139.0f, 8280.0f, 8281.0f, 8282.0f, 8283.0f, 8284.0f, 8285.0f, 8286.0f, 8287.0f, 8288.0f, 8289.0f, 8290.0f, 8291.0f, 8292.0f, 8293.0f, 8294.0f, 8295.0f, 8296.0f, 8297.0f, 8298.0f, 8299.0f, 8300.0f, 8301.0f, 8302.0f, 8303.0f, 8304.0f, 8305.0f, 8306.0f, 8307.0f, 8308.0f, 8309.0f, 8310.0f, 8311.0f, 8312.0f, 8313.0f, 8314.0f, 8315.0f, 8316.0f, 8317.0f, 8318.0f, 8319.0f, 8320.0f, 8321.0f, 8322.0f, 8323.0f, 8324.0f, 8325.0f, 8326.0f, 8327.0f, 8328.0f, 8329.0f, 8330.0f, 8331.0f, 8332.0f, 8333.0f, 8334.0f, 8335.0f, 8336.0f, 8337.0f, 8338.0f, 8339.0f, -4140.0f, -4141.0f, -4142.0f, -4143.0f, -4144.0f, -4145.0f, -4146.0f, -4147.0f, -4148.0f, -4149.0f, -4150.0f, -4151.0f, -4152.0f, -4153.0f, -4154.0f, -4155.0f, -4156.0f, -4157.0f, -4158.0f, -4159.0f, -4160.0f, -4161.0f, -4162.0f, -4163.0f, -4164.0f, -4165.0f, -4166.0f, -4167.0f, -4168.0f, -4169.0f, 8340.0f, 8341.0f, 8342.0f, 8343.0f, 8344.0f, 8345.0f, 8346.0f, 8347.0f, 8348.0f, 8349.0f, 8350.0f, 8351.0f, 8352.0f, 8353.0f, 8354.0f, 8355.0f, 8356.0f, 8357.0f, 8358.0f, 8359.0f, 8360.0f, 8361.0f, 8362.0f, 8363.0f, 8364.0f, 8365.0f, 8366.0f, 8367.0f, 8368.0f, 8369.0f, 8370.0f, 8371.0f, 8372.0f, 8373.0f, 8374.0f, 8375.0f, 8376.0f, 8377.0f, 8378.0f, 8379.0f, 8380.0f, 8381.0f, 8382.0f, 8383.0f, 8384.0f, 8385.0f, 8386.0f, 8387.0f, 8388.0f, 8389.0f, 8390.0f, 8391.0f, 8392.0f, 8393.0f, 8394.0f, 8395.0f, 8396.0f, 8397.0f, 8398.0f, 8399.0f, -4170.0f, -4171.0f, -4172.0f, -4173.0f, -4174.0f, -4175.0f, -4176.0f, -4177.0f, -4178.0f, -4179.0f, -4180.0f, -4181.0f, -4182.0f, -4183.0f, -4184.0f, -4185.0f, -4186.0f, -4187.0f, -4188.0f, -4189.0f, -4190.0f, -4191.0f, -4192.0f, -4193.0f, -4194.0f, -4195.0f, -4196.0f, -4197.0f, -4198.0f, -4199.0f, 8400.0f, 8401.0f, 8402.0f, 8403.0f, 8404.0f, 8405.0f, 8406.0f, 8407.0f, 8408.0f, 8409.0f, 8410.0f, 8411.0f, 8412.0f, 8413.0f, 8414.0f, 8415.0f, 8416.0f, 8417.0f, 8418.0f, 8419.0f, 8420.0f, 8421.0f, 8422.0f, 8423.0f, 8424.0f, 8425.0f, 8426.0f, 8427.0f, 8428.0f, 8429.0f, 8430.0f, 8431.0f, 8432.0f, 8433.0f, 8434.0f, 8435.0f, 8436.0f, 8437.0f, 8438.0f, 8439.0f, 8440.0f, 8441.0f, 8442.0f, 8443.0f, 8444.0f, 8445.0f, 8446.0f, 8447.0f, 8448.0f, 8449.0f, 8450.0f, 8451.0f, 8452.0f, 8453.0f, 8454.0f, 8455.0f, 8456.0f, 8457.0f, 8458.0f, 8459.0f, -4200.0f, -4201.0f, -4202.0f, -4203.0f, -4204.0f, -4205.0f, -4206.0f, -4207.0f, -4208.0f, -4209.0f, -4210.0f, -4211.0f, -4212.0f, -4213.0f, -4214.0f, -4215.0f, -4216.0f, -4217.0f, -4218.0f, -4219.0f, -4220.0f, -4221.0f, -4222.0f, -4223.0f, -4224.0f, -4225.0f, -4226.0f, -4227.0f, -4228.0f, -4229.0f, 8460.0f, 8461.0f, 8462.0f, 8463.0f, 8464.0f, 8465.0f, 8466.0f, 8467.0f, 8468.0f, 8469.0f, 8470.0f, 8471.0f, 8472.0f, 8473.0f, 8474.0f, 8475.0f, 8476.0f, 8477.0f, 8478.0f, 8479.0f, 8480.0f, 8481.0f, 8482.0f, 8483.0f, 8484.0f, 8485.0f, 8486.0f, 8487.0f, 8488.0f, 8489.0f, 8490.0f, 8491.0f, 8492.0f, 8493.0f, 8494.0f, 8495.0f, 8496.0f, 8497.0f, 8498.0f, 8499.0f, 8500.0f, 8501.0f, 8502.0f, 8503.0f, 8504.0f, 8505.0f, 8506.0f, 8507.0f, 8508.0f, 8509.0f, 8510.0f, 8511.0f, 8512.0f, 8513.0f, 8514.0f, 8515.0f, 8516.0f, 8517.0f, 8518.0f, 8519.0f, -4230.0f, -4231.0f, -4232.0f, -4233.0f, -4234.0f, -4235.0f, -4236.0f, -4237.0f, -4238.0f, -4239.0f, -4240.0f, -4241.0f, -4242.0f, -4243.0f, -4244.0f, -4245.0f, -4246.0f, -4247.0f, -4248.0f, -4249.0f, -4250.0f, -4251.0f, -4252.0f, -4253.0f, -4254.0f, -4255.0f, -4256.0f, -4257.0f, -4258.0f, -4259.0f, 8520.0f, 8521.0f, 8522.0f, 8523.0f, 8524.0f, 8525.0f, 8526.0f, 8527.0f, 8528.0f, 8529.0f, 8530.0f, 8531.0f, 8532.0f, 8533.0f, 8534.0f, 8535.0f, 8536.0f, 8537.0f, 8538.0f, 8539.0f, 8540.0f, 8541.0f, 8542.0f, 8543.0f, 8544.0f, 8545.0f, 8546.0f, 8547.0f, 8548.0f, 8549.0f, 8550.0f, 8551.0f, 8552.0f, 8553.0f, 8554.0f, 8555.0f, 8556.0f, 8557.0f, 8558.0f, 8559.0f, 8560.0f, 8561.0f, 8562.0f, 8563.0f, 8564.0f, 8565.0f, 8566.0f, 8567.0f, 8568.0f, 8569.0f, 8570.0f, 8571.0f, 8572.0f, 8573.0f, 8574.0f, 8575.0f, 8576.0f, 8577.0f, 8578.0f, 8579.0f, -4260.0f, -4261.0f, -4262.0f, -4263.0f, -4264.0f, -4265.0f, -4266.0f, -4267.0f, -4268.0f, -4269.0f, -4270.0f, -4271.0f, -4272.0f, -4273.0f, -4274.0f, -4275.0f, -4276.0f, -4277.0f, -4278.0f, -4279.0f, -4280.0f, -4281.0f, -4282.0f, -4283.0f, -4284.0f, -4285.0f, -4286.0f, -4287.0f, -4288.0f, -4289.0f, 8580.0f, 8581.0f, 8582.0f, 8583.0f, 8584.0f, 8585.0f, 8586.0f, 8587.0f, 8588.0f, 8589.0f, 8590.0f, 8591.0f, 8592.0f, 8593.0f, 8594.0f, 8595.0f, 8596.0f, 8597.0f, 8598.0f, 8599.0f, 8600.0f, 8601.0f, 8602.0f, 8603.0f, 8604.0f, 8605.0f, 8606.0f, 8607.0f, 8608.0f, 8609.0f, 8610.0f, 8611.0f, 8612.0f, 8613.0f, 8614.0f, 8615.0f, 8616.0f, 8617.0f, 8618.0f, 8619.0f, 8620.0f, 8621.0f, 8622.0f, 8623.0f, 8624.0f, 8625.0f, 8626.0f, 8627.0f, 8628.0f, 8629.0f, 8630.0f, 8631.0f, 8632.0f, 8633.0f, 8634.0f, 8635.0f, 8636.0f, 8637.0f, 8638.0f, 8639.0f, -4290.0f, -4291.0f, -4292.0f, -4293.0f, -4294.0f, -4295.0f, -4296.0f, -4297.0f, -4298.0f, -4299.0f, -4300.0f, -4301.0f, -4302.0f, -4303.0f, -4304.0f, -4305.0f, -4306.0f, -4307.0f, -4308.0f, -4309.0f, -4310.0f, -4311.0f, -4312.0f, -4313.0f, -4314.0f, -4315.0f, -4316.0f, -4317.0f, -4318.0f, -4319.0f, 8640.0f, 8641.0f, 8642.0f, 8643.0f, 8644.0f, 8645.0f, 8646.0f, 8647.0f, 8648.0f, 8649.0f, 8650.0f, 8651.0f, 8652.0f, 8653.0f, 8654.0f, 8655.0f, 8656.0f, 8657.0f, 8658.0f, 8659.0f, 8660.0f, 8661.0f, 8662.0f, 8663.0f, 8664.0f, 8665.0f, 8666.0f, 8667.0f, 8668.0f, 8669.0f, 8670.0f, 8671.0f, 8672.0f, 8673.0f, 8674.0f, 8675.0f, 8676.0f, 8677.0f, 8678.0f, 8679.0f, 8680.0f, 8681.0f, 8682.0f, 8683.0f, 8684.0f, 8685.0f, 8686.0f, 8687.0f, 8688.0f, 8689.0f, 8690.0f, 8691.0f, 8692.0f, 8693.0f, 8694.0f, 8695.0f, 8696.0f, 8697.0f, 8698.0f, 8699.0f, -4320.0f, -4321.0f, -4322.0f, -4323.0f, -4324.0f, -4325.0f, -4326.0f, -4327.0f, -4328.0f, -4329.0f, -4330.0f, -4331.0f, -4332.0f, -4333.0f, -4334.0f, -4335.0f, -4336.0f, -4337.0f, -4338.0f, -4339.0f, -4340.0f, -4341.0f, -4342.0f, -4343.0f, -4344.0f, -4345.0f, -4346.0f, -4347.0f, -4348.0f, -4349.0f, 8700.0f, 8701.0f, 8702.0f, 8703.0f, 8704.0f, 8705.0f, 8706.0f, 8707.0f, 8708.0f, 8709.0f, 8710.0f, 8711.0f, 8712.0f, 8713.0f, 8714.0f, 8715.0f, 8716.0f, 8717.0f, 8718.0f, 8719.0f, 8720.0f, 8721.0f, 8722.0f, 8723.0f, 8724.0f, 8725.0f, 8726.0f, 8727.0f, 8728.0f, 8729.0f, 8730.0f, 8731.0f, 8732.0f, 8733.0f, 8734.0f, 8735.0f, 8736.0f, 8737.0f, 8738.0f, 8739.0f, 8740.0f, 8741.0f, 8742.0f, 8743.0f, 8744.0f, 8745.0f, 8746.0f, 8747.0f, 8748.0f, 8749.0f, 8750.0f, 8751.0f, 8752.0f, 8753.0f, 8754.0f, 8755.0f, 8756.0f, 8757.0f, 8758.0f, 8759.0f, -4350.0f, -4351.0f, -4352.0f, -4353.0f, -4354.0f, -4355.0f, -4356.0f, -4357.0f, -4358.0f, -4359.0f, -4360.0f, -4361.0f, -4362.0f, -4363.0f, -4364.0f, -4365.0f, -4366.0f, -4367.0f, -4368.0f, -4369.0f, -4370.0f, -4371.0f, -4372.0f, -4373.0f, -4374.0f, -4375.0f, -4376.0f, -4377.0f, -4378.0f, -4379.0f, 8760.0f, 8761.0f, 8762.0f, 8763.0f, 8764.0f, 8765.0f, 8766.0f, 8767.0f, 8768.0f, 8769.0f, 8770.0f, 8771.0f, 8772.0f, 8773.0f, 8774.0f, 8775.0f, 8776.0f, 8777.0f, 8778.0f, 8779.0f, 8780.0f, 8781.0f, 8782.0f, 8783.0f, 8784.0f, 8785.0f, 8786.0f, 8787.0f, 8788.0f, 8789.0f, 8790.0f, 8791.0f, 8792.0f, 8793.0f, 8794.0f, 8795.0f, 8796.0f, 8797.0f, 8798.0f, 8799.0f, 8800.0f, 8801.0f, 8802.0f, 8803.0f, 8804.0f, 8805.0f, 8806.0f, 8807.0f, 8808.0f, 8809.0f, 8810.0f, 8811.0f, 8812.0f, 8813.0f, 8814.0f, 8815.0f, 8816.0f, 8817.0f, 8818.0f, 8819.0f, -4380.0f, -4381.0f, -4382.0f, -4383.0f, -4384.0f, -4385.0f, -4386.0f, -4387.0f, -4388.0f, -4389.0f, -4390.0f, -4391.0f, -4392.0f, -4393.0f, -4394.0f, -4395.0f, -4396.0f, -4397.0f, -4398.0f, -4399.0f, -4400.0f, -4401.0f, -4402.0f, -4403.0f, -4404.0f, -4405.0f, -4406.0f, -4407.0f, -4408.0f, -4409.0f, 8820.0f, 8821.0f, 8822.0f, 8823.0f, 8824.0f, 8825.0f, 8826.0f, 8827.0f, 8828.0f, 8829.0f, 8830.0f, 8831.0f, 8832.0f, 8833.0f, 8834.0f, 8835.0f, 8836.0f, 8837.0f, 8838.0f, 8839.0f, 8840.0f, 8841.0f, 8842.0f, 8843.0f, 8844.0f, 8845.0f, 8846.0f, 8847.0f, 8848.0f, 8849.0f, 8850.0f, 8851.0f, 8852.0f, 8853.0f, 8854.0f, 8855.0f, 8856.0f, 8857.0f, 8858.0f, 8859.0f, 8860.0f, 8861.0f, 8862.0f, 8863.0f, 8864.0f, 8865.0f, 8866.0f, 8867.0f, 8868.0f, 8869.0f, 8870.0f, 8871.0f, 8872.0f, 8873.0f, 8874.0f, 8875.0f, 8876.0f, 8877.0f, 8878.0f, 8879.0f, -4410.0f, -4411.0f, -4412.0f, -4413.0f, -4414.0f, -4415.0f, -4416.0f, -4417.0f, -4418.0f, -4419.0f, -4420.0f, -4421.0f, -4422.0f, -4423.0f, -4424.0f, -4425.0f, -4426.0f, -4427.0f, -4428.0f, -4429.0f, -4430.0f, -4431.0f, -4432.0f, -4433.0f, -4434.0f, -4435.0f, -4436.0f, -4437.0f, -4438.0f, -4439.0f, 8880.0f, 8881.0f, 8882.0f, 8883.0f, 8884.0f, 8885.0f, 8886.0f, 8887.0f, 8888.0f, 8889.0f, 8890.0f, 8891.0f, 8892.0f, 8893.0f, 8894.0f, 8895.0f, 8896.0f, 8897.0f, 8898.0f, 8899.0f, 8900.0f, 8901.0f, 8902.0f, 8903.0f, 8904.0f, 8905.0f, 8906.0f, 8907.0f, 8908.0f, 8909.0f, 8910.0f, 8911.0f, 8912.0f, 8913.0f, 8914.0f, 8915.0f, 8916.0f, 8917.0f, 8918.0f, 8919.0f, 8920.0f, 8921.0f, 8922.0f, 8923.0f, 8924.0f, 8925.0f, 8926.0f, 8927.0f, 8928.0f, 8929.0f, 8930.0f, 8931.0f, 8932.0f, 8933.0f, 8934.0f, 8935.0f, 8936.0f, 8937.0f, 8938.0f, 8939.0f, -4440.0f, -4441.0f, -4442.0f, -4443.0f, -4444.0f, -4445.0f, -4446.0f, -4447.0f, -4448.0f, -4449.0f, -4450.0f, -4451.0f, -4452.0f, -4453.0f, -4454.0f, -4455.0f, -4456.0f, -4457.0f, -4458.0f, -4459.0f, -4460.0f, -4461.0f, -4462.0f, -4463.0f, -4464.0f, -4465.0f, -4466.0f, -4467.0f, -4468.0f, -4469.0f, 8940.0f, 8941.0f, 8942.0f, 8943.0f, 8944.0f, 8945.0f, 8946.0f, 8947.0f, 8948.0f, 8949.0f, 8950.0f, 8951.0f, 8952.0f, 8953.0f, 8954.0f, 8955.0f, 8956.0f, 8957.0f, 8958.0f, 8959.0f, 8960.0f, 8961.0f, 8962.0f, 8963.0f, 8964.0f, 8965.0f, 8966.0f, 8967.0f, 8968.0f, 8969.0f, 8970.0f, 8971.0f, 8972.0f, 8973.0f, 8974.0f, 8975.0f, 8976.0f, 8977.0f, 8978.0f, 8979.0f, 8980.0f, 8981.0f, 8982.0f, 8983.0f, 8984.0f, 8985.0f, 8986.0f, 8987.0f, 8988.0f, 8989.0f, 8990.0f, 8991.0f, 8992.0f, 8993.0f, 8994.0f, 8995.0f, 8996.0f, 8997.0f, 8998.0f, 8999.0f, -4470.0f, -4471.0f, -4472.0f, -4473.0f, -4474.0f, -4475.0f, -4476.0f, -4477.0f, -4478.0f, -4479.0f, -4480.0f, -4481.0f, -4482.0f, -4483.0f, -4484.0f, -4485.0f, -4486.0f, -4487.0f, -4488.0f, -4489.0f, -4490.0f, -4491.0f, -4492.0f, -4493.0f, -4494.0f, -4495.0f, -4496.0f, -4497.0f, -4498.0f, -4499.0f, 9000.0f, 9001.0f, 9002.0f, 9003.0f, 9004.0f, 9005.0f, 9006.0f, 9007.0f, 9008.0f, 9009.0f, 9010.0f, 9011.0f, 9012.0f, 9013.0f, 9014.0f, 9015.0f, 9016.0f, 9017.0f, 9018.0f, 9019.0f, 9020.0f, 9021.0f, 9022.0f, 9023.0f, 9024.0f, 9025.0f, 9026.0f, 9027.0f, 9028.0f, 9029.0f, 9030.0f, 9031.0f, 9032.0f, 9033.0f, 9034.0f, 9035.0f, 9036.0f, 9037.0f, 9038.0f, 9039.0f, 9040.0f, 9041.0f, 9042.0f, 9043.0f, 9044.0f, 9045.0f, 9046.0f, 9047.0f, 9048.0f, 9049.0f, 9050.0f, 9051.0f, 9052.0f, 9053.0f, 9054.0f, 9055.0f, 9056.0f, 9057.0f, 9058.0f, 9059.0f, -4500.0f, -4501.0f, -4502.0f, -4503.0f, -4504.0f, -4505.0f, -4506.0f, -4507.0f, -4508.0f, -4509.0f, -4510.0f, -4511.0f, -4512.0f, -4513.0f, -4514.0f, -4515.0f, -4516.0f, -4517.0f, -4518.0f, -4519.0f, -4520.0f, -4521.0f, -4522.0f, -4523.0f, -4524.0f, -4525.0f, -4526.0f, -4527.0f, -4528.0f, -4529.0f, 9060.0f, 9061.0f, 9062.0f, 9063.0f, 9064.0f, 9065.0f, 9066.0f, 9067.0f, 9068.0f, 9069.0f, 9070.0f, 9071.0f, 9072.0f, 9073.0f, 9074.0f, 9075.0f, 9076.0f, 9077.0f, 9078.0f, 9079.0f, 9080.0f, 9081.0f, 9082.0f, 9083.0f, 9084.0f, 9085.0f, 9086.0f, 9087.0f, 9088.0f, 9089.0f, 9090.0f, 9091.0f, 9092.0f, 9093.0f, 9094.0f, 9095.0f, 9096.0f, 9097.0f, 9098.0f, 9099.0f, 9100.0f, 9101.0f, 9102.0f, 9103.0f, 9104.0f, 9105.0f, 9106.0f, 9107.0f, 9108.0f, 9109.0f, 9110.0f, 9111.0f, 9112.0f, 9113.0f, 9114.0f, 9115.0f, 9116.0f, 9117.0f, 9118.0f, 9119.0f, -4530.0f, -4531.0f, -4532.0f, -4533.0f, -4534.0f, -4535.0f, -4536.0f, -4537.0f, -4538.0f, -4539.0f, -4540.0f, -4541.0f, -4542.0f, -4543.0f, -4544.0f, -4545.0f, -4546.0f, -4547.0f, -4548.0f, -4549.0f, -4550.0f, -4551.0f, -4552.0f, -4553.0f, -4554.0f, -4555.0f, -4556.0f, -4557.0f, -4558.0f, -4559.0f, 9120.0f, 9121.0f, 9122.0f, 9123.0f, 9124.0f, 9125.0f, 9126.0f, 9127.0f, 9128.0f, 9129.0f, 9130.0f, 9131.0f, 9132.0f, 9133.0f, 9134.0f, 9135.0f, 9136.0f, 9137.0f, 9138.0f, 9139.0f, 9140.0f, 9141.0f, 9142.0f, 9143.0f, 9144.0f, 9145.0f, 9146.0f, 9147.0f, 9148.0f, 9149.0f, 9150.0f, 9151.0f, 9152.0f, 9153.0f, 9154.0f, 9155.0f, 9156.0f, 9157.0f, 9158.0f, 9159.0f, 9160.0f, 9161.0f, 9162.0f, 9163.0f, 9164.0f, 9165.0f, 9166.0f, 9167.0f, 9168.0f, 9169.0f, 9170.0f, 9171.0f, 9172.0f, 9173.0f, 9174.0f, 9175.0f, 9176.0f, 9177.0f, 9178.0f, 9179.0f, -4560.0f, -4561.0f, -4562.0f, -4563.0f, -4564.0f, -4565.0f, -4566.0f, -4567.0f, -4568.0f, -4569.0f, -4570.0f, -4571.0f, -4572.0f, -4573.0f, -4574.0f, -4575.0f, -4576.0f, -4577.0f, -4578.0f, -4579.0f, -4580.0f, -4581.0f, -4582.0f, -4583.0f, -4584.0f, -4585.0f, -4586.0f, -4587.0f, -4588.0f, -4589.0f, 9180.0f, 9181.0f, 9182.0f, 9183.0f, 9184.0f, 9185.0f, 9186.0f, 9187.0f, 9188.0f, 9189.0f, 9190.0f, 9191.0f, 9192.0f, 9193.0f, 9194.0f, 9195.0f, 9196.0f, 9197.0f, 9198.0f, 9199.0f, 9200.0f, 9201.0f, 9202.0f, 9203.0f, 9204.0f, 9205.0f, 9206.0f, 9207.0f, 9208.0f, 9209.0f, 9210.0f, 9211.0f, 9212.0f, 9213.0f, 9214.0f, 9215.0f, 9216.0f, 9217.0f, 9218.0f, 9219.0f, 9220.0f, 9221.0f, 9222.0f, 9223.0f, 9224.0f, 9225.0f, 9226.0f, 9227.0f, 9228.0f, 9229.0f, 9230.0f, 9231.0f, 9232.0f, 9233.0f, 9234.0f, 9235.0f, 9236.0f, 9237.0f, 9238.0f, 9239.0f, -4590.0f, -4591.0f, -4592.0f, -4593.0f, -4594.0f, -4595.0f, -4596.0f, -4597.0f, -4598.0f, -4599.0f, -4600.0f, -4601.0f, -4602.0f, -4603.0f, -4604.0f, -4605.0f, -4606.0f, -4607.0f, -4608.0f, -4609.0f, -4610.0f, -4611.0f, -4612.0f, -4613.0f, -4614.0f, -4615.0f, -4616.0f, -4617.0f, -4618.0f, -4619.0f, 9240.0f, 9241.0f, 9242.0f, 9243.0f, 9244.0f, 9245.0f, 9246.0f, 9247.0f, 9248.0f, 9249.0f, 9250.0f, 9251.0f, 9252.0f, 9253.0f, 9254.0f, 9255.0f, 9256.0f, 9257.0f, 9258.0f, 9259.0f, 9260.0f, 9261.0f, 9262.0f, 9263.0f, 9264.0f, 9265.0f, 9266.0f, 9267.0f, 9268.0f, 9269.0f, 9270.0f, 9271.0f, 9272.0f, 9273.0f, 9274.0f, 9275.0f, 9276.0f, 9277.0f, 9278.0f, 9279.0f, 9280.0f, 9281.0f, 9282.0f, 9283.0f, 9284.0f, 9285.0f, 9286.0f, 9287.0f, 9288.0f, 9289.0f, 9290.0f, 9291.0f, 9292.0f, 9293.0f, 9294.0f, 9295.0f, 9296.0f, 9297.0f, 9298.0f, 9299.0f, -4620.0f, -4621.0f, -4622.0f, -4623.0f, -4624.0f, -4625.0f, -4626.0f, -4627.0f, -4628.0f, -4629.0f, -4630.0f, -4631.0f, -4632.0f, -4633.0f, -4634.0f, -4635.0f, -4636.0f, -4637.0f, -4638.0f, -4639.0f, -4640.0f, -4641.0f, -4642.0f, -4643.0f, -4644.0f, -4645.0f, -4646.0f, -4647.0f, -4648.0f, -4649.0f, 9300.0f, 9301.0f, 9302.0f, 9303.0f, 9304.0f, 9305.0f, 9306.0f, 9307.0f, 9308.0f, 9309.0f, 9310.0f, 9311.0f, 9312.0f, 9313.0f, 9314.0f, 9315.0f, 9316.0f, 9317.0f, 9318.0f, 9319.0f, 9320.0f, 9321.0f, 9322.0f, 9323.0f, 9324.0f, 9325.0f, 9326.0f, 9327.0f, 9328.0f, 9329.0f, 9330.0f, 9331.0f, 9332.0f, 9333.0f, 9334.0f, 9335.0f, 9336.0f, 9337.0f, 9338.0f, 9339.0f, 9340.0f, 9341.0f, 9342.0f, 9343.0f, 9344.0f, 9345.0f, 9346.0f, 9347.0f, 9348.0f, 9349.0f, 9350.0f, 9351.0f, 9352.0f, 9353.0f, 9354.0f, 9355.0f, 9356.0f, 9357.0f, 9358.0f, 9359.0f, -4650.0f, -4651.0f, -4652.0f, -4653.0f, -4654.0f, -4655.0f, -4656.0f, -4657.0f, -4658.0f, -4659.0f, -4660.0f, -4661.0f, -4662.0f, -4663.0f, -4664.0f, -4665.0f, -4666.0f, -4667.0f, -4668.0f, -4669.0f, -4670.0f, -4671.0f, -4672.0f, -4673.0f, -4674.0f, -4675.0f, -4676.0f, -4677.0f, -4678.0f, -4679.0f, 9360.0f, 9361.0f, 9362.0f, 9363.0f, 9364.0f, 9365.0f, 9366.0f, 9367.0f, 9368.0f, 9369.0f, 9370.0f, 9371.0f, 9372.0f, 9373.0f, 9374.0f, 9375.0f, 9376.0f, 9377.0f, 9378.0f, 9379.0f, 9380.0f, 9381.0f, 9382.0f, 9383.0f, 9384.0f, 9385.0f, 9386.0f, 9387.0f, 9388.0f, 9389.0f, 9390.0f, 9391.0f, 9392.0f, 9393.0f, 9394.0f, 9395.0f, 9396.0f, 9397.0f, 9398.0f, 9399.0f, 9400.0f, 9401.0f, 9402.0f, 9403.0f, 9404.0f, 9405.0f, 9406.0f, 9407.0f, 9408.0f, 9409.0f, 9410.0f, 9411.0f, 9412.0f, 9413.0f, 9414.0f, 9415.0f, 9416.0f, 9417.0f, 9418.0f, 9419.0f, -4680.0f, -4681.0f, -4682.0f, -4683.0f, -4684.0f, -4685.0f, -4686.0f, -4687.0f, -4688.0f, -4689.0f, -4690.0f, -4691.0f, -4692.0f, -4693.0f, -4694.0f, -4695.0f, -4696.0f, -4697.0f, -4698.0f, -4699.0f, -4700.0f, -4701.0f, -4702.0f, -4703.0f, -4704.0f, -4705.0f, -4706.0f, -4707.0f, -4708.0f, -4709.0f, 9420.0f, 9421.0f, 9422.0f, 9423.0f, 9424.0f, 9425.0f, 9426.0f, 9427.0f, 9428.0f, 9429.0f, 9430.0f, 9431.0f, 9432.0f, 9433.0f, 9434.0f, 9435.0f, 9436.0f, 9437.0f, 9438.0f, 9439.0f, 9440.0f, 9441.0f, 9442.0f, 9443.0f, 9444.0f, 9445.0f, 9446.0f, 9447.0f, 9448.0f, 9449.0f, 9450.0f, 9451.0f, 9452.0f, 9453.0f, 9454.0f, 9455.0f, 9456.0f, 9457.0f, 9458.0f, 9459.0f, 9460.0f, 9461.0f, 9462.0f, 9463.0f, 9464.0f, 9465.0f, 9466.0f, 9467.0f, 9468.0f, 9469.0f, 9470.0f, 9471.0f, 9472.0f, 9473.0f, 9474.0f, 9475.0f, 9476.0f, 9477.0f, 9478.0f, 9479.0f, -4710.0f, -4711.0f, -4712.0f, -4713.0f, -4714.0f, -4715.0f, -4716.0f, -4717.0f, -4718.0f, -4719.0f, -4720.0f, -4721.0f, -4722.0f, -4723.0f, -4724.0f, -4725.0f, -4726.0f, -4727.0f, -4728.0f, -4729.0f, -4730.0f, -4731.0f, -4732.0f, -4733.0f, -4734.0f, -4735.0f, -4736.0f, -4737.0f, -4738.0f, -4739.0f, 9480.0f, 9481.0f, 9482.0f, 9483.0f, 9484.0f, 9485.0f, 9486.0f, 9487.0f, 9488.0f, 9489.0f, 9490.0f, 9491.0f, 9492.0f, 9493.0f, 9494.0f, 9495.0f, 9496.0f, 9497.0f, 9498.0f, 9499.0f, 9500.0f, 9501.0f, 9502.0f, 9503.0f, 9504.0f, 9505.0f, 9506.0f, 9507.0f, 9508.0f, 9509.0f, 9510.0f, 9511.0f, 9512.0f, 9513.0f, 9514.0f, 9515.0f, 9516.0f, 9517.0f, 9518.0f, 9519.0f, 9520.0f, 9521.0f, 9522.0f, 9523.0f, 9524.0f, 9525.0f, 9526.0f, 9527.0f, 9528.0f, 9529.0f, 9530.0f, 9531.0f, 9532.0f, 9533.0f, 9534.0f, 9535.0f, 9536.0f, 9537.0f, 9538.0f, 9539.0f, -4740.0f, -4741.0f, -4742.0f, -4743.0f, -4744.0f, -4745.0f, -4746.0f, -4747.0f, -4748.0f, -4749.0f, -4750.0f, -4751.0f, -4752.0f, -4753.0f, -4754.0f, -4755.0f, -4756.0f, -4757.0f, -4758.0f, -4759.0f, -4760.0f, -4761.0f, -4762.0f, -4763.0f, -4764.0f, -4765.0f, -4766.0f, -4767.0f, -4768.0f, -4769.0f, 9540.0f, 9541.0f, 9542.0f, 9543.0f, 9544.0f, 9545.0f, 9546.0f, 9547.0f, 9548.0f, 9549.0f, 9550.0f, 9551.0f, 9552.0f, 9553.0f, 9554.0f, 9555.0f, 9556.0f, 9557.0f, 9558.0f, 9559.0f, 9560.0f, 9561.0f, 9562.0f, 9563.0f, 9564.0f, 9565.0f, 9566.0f, 9567.0f, 9568.0f, 9569.0f, 9570.0f, 9571.0f, 9572.0f, 9573.0f, 9574.0f, 9575.0f, 9576.0f, 9577.0f, 9578.0f, 9579.0f, 9580.0f, 9581.0f, 9582.0f, 9583.0f, 9584.0f, 9585.0f, 9586.0f, 9587.0f, 9588.0f, 9589.0f, 9590.0f, 9591.0f, 9592.0f, 9593.0f, 9594.0f, 9595.0f, 9596.0f, 9597.0f, 9598.0f, 9599.0f, -4770.0f, -4771.0f, -4772.0f, -4773.0f, -4774.0f, -4775.0f, -4776.0f, -4777.0f, -4778.0f, -4779.0f, -4780.0f, -4781.0f, -4782.0f, -4783.0f, -4784.0f, -4785.0f, -4786.0f, -4787.0f, -4788.0f, -4789.0f, -4790.0f, -4791.0f, -4792.0f, -4793.0f, -4794.0f, -4795.0f, -4796.0f, -4797.0f, -4798.0f, -4799.0f, 9600.0f, 9601.0f, 9602.0f, 9603.0f, 9604.0f, 9605.0f, 9606.0f, 9607.0f, 9608.0f, 9609.0f, 9610.0f, 9611.0f, 9612.0f, 9613.0f, 9614.0f, 9615.0f, 9616.0f, 9617.0f, 9618.0f, 9619.0f, 9620.0f, 9621.0f, 9622.0f, 9623.0f, 9624.0f, 9625.0f, 9626.0f, 9627.0f, 9628.0f, 9629.0f, 9630.0f, 9631.0f, 9632.0f, 9633.0f, 9634.0f, 9635.0f, 9636.0f, 9637.0f, 9638.0f, 9639.0f, 9640.0f, 9641.0f, 9642.0f, 9643.0f, 9644.0f, 9645.0f, 9646.0f, 9647.0f, 9648.0f, 9649.0f, 9650.0f, 9651.0f, 9652.0f, 9653.0f, 9654.0f, 9655.0f, 9656.0f, 9657.0f, 9658.0f, 9659.0f, -4800.0f, -4801.0f, -4802.0f, -4803.0f, -4804.0f, -4805.0f, -4806.0f, -4807.0f, -4808.0f, -4809.0f, -4810.0f, -4811.0f, -4812.0f, -4813.0f, -4814.0f, -4815.0f, -4816.0f, -4817.0f, -4818.0f, -4819.0f, -4820.0f, -4821.0f, -4822.0f, -4823.0f, -4824.0f, -4825.0f, -4826.0f, -4827.0f, -4828.0f, -4829.0f, 9660.0f, 9661.0f, 9662.0f, 9663.0f, 9664.0f, 9665.0f, 9666.0f, 9667.0f, 9668.0f, 9669.0f, 9670.0f, 9671.0f, 9672.0f, 9673.0f, 9674.0f, 9675.0f, 9676.0f, 9677.0f, 9678.0f, 9679.0f, 9680.0f, 9681.0f, 9682.0f, 9683.0f, 9684.0f, 9685.0f, 9686.0f, 9687.0f, 9688.0f, 9689.0f, 9690.0f, 9691.0f, 9692.0f, 9693.0f, 9694.0f, 9695.0f, 9696.0f, 9697.0f, 9698.0f, 9699.0f, 9700.0f, 9701.0f, 9702.0f, 9703.0f, 9704.0f, 9705.0f, 9706.0f, 9707.0f, 9708.0f, 9709.0f, 9710.0f, 9711.0f, 9712.0f, 9713.0f, 9714.0f, 9715.0f, 9716.0f, 9717.0f, 9718.0f, 9719.0f, -4830.0f, -4831.0f, -4832.0f, -4833.0f, -4834.0f, -4835.0f, -4836.0f, -4837.0f, -4838.0f, -4839.0f, -4840.0f, -4841.0f, -4842.0f, -4843.0f, -4844.0f, -4845.0f, -4846.0f, -4847.0f, -4848.0f, -4849.0f, -4850.0f, -4851.0f, -4852.0f, -4853.0f, -4854.0f, -4855.0f, -4856.0f, -4857.0f, -4858.0f, -4859.0f, 9720.0f, 9721.0f, 9722.0f, 9723.0f, 9724.0f, 9725.0f, 9726.0f, 9727.0f, 9728.0f, 9729.0f, 9730.0f, 9731.0f, 9732.0f, 9733.0f, 9734.0f, 9735.0f, 9736.0f, 9737.0f, 9738.0f, 9739.0f, 9740.0f, 9741.0f, 9742.0f, 9743.0f, 9744.0f, 9745.0f, 9746.0f, 9747.0f, 9748.0f, 9749.0f, 9750.0f, 9751.0f, 9752.0f, 9753.0f, 9754.0f, 9755.0f, 9756.0f, 9757.0f, 9758.0f, 9759.0f, 9760.0f, 9761.0f, 9762.0f, 9763.0f, 9764.0f, 9765.0f, 9766.0f, 9767.0f, 9768.0f, 9769.0f, 9770.0f, 9771.0f, 9772.0f, 9773.0f, 9774.0f, 9775.0f, 9776.0f, 9777.0f, 9778.0f, 9779.0f, -4860.0f, -4861.0f, -4862.0f, -4863.0f, -4864.0f, -4865.0f, -4866.0f, -4867.0f, -4868.0f, -4869.0f, -4870.0f, -4871.0f, -4872.0f, -4873.0f, -4874.0f, -4875.0f, -4876.0f, -4877.0f, -4878.0f, -4879.0f, -4880.0f, -4881.0f, -4882.0f, -4883.0f, -4884.0f, -4885.0f, -4886.0f, -4887.0f, -4888.0f, -4889.0f, 9780.0f, 9781.0f, 9782.0f, 9783.0f, 9784.0f, 9785.0f, 9786.0f, 9787.0f, 9788.0f, 9789.0f, 9790.0f, 9791.0f, 9792.0f, 9793.0f, 9794.0f, 9795.0f, 9796.0f, 9797.0f, 9798.0f, 9799.0f, 9800.0f, 9801.0f, 9802.0f, 9803.0f, 9804.0f, 9805.0f, 9806.0f, 9807.0f, 9808.0f, 9809.0f, 9810.0f, 9811.0f, 9812.0f, 9813.0f, 9814.0f, 9815.0f, 9816.0f, 9817.0f, 9818.0f, 9819.0f, 9820.0f, 9821.0f, 9822.0f, 9823.0f, 9824.0f, 9825.0f, 9826.0f, 9827.0f, 9828.0f, 9829.0f, 9830.0f, 9831.0f, 9832.0f, 9833.0f, 9834.0f, 9835.0f, 9836.0f, 9837.0f, 9838.0f, 9839.0f, -4890.0f, -4891.0f, -4892.0f, -4893.0f, -4894.0f, -4895.0f, -4896.0f, -4897.0f, -4898.0f, -4899.0f, -4900.0f, -4901.0f, -4902.0f, -4903.0f, -4904.0f, -4905.0f, -4906.0f, -4907.0f, -4908.0f, -4909.0f, -4910.0f, -4911.0f, -4912.0f, -4913.0f, -4914.0f, -4915.0f, -4916.0f, -4917.0f, -4918.0f, -4919.0f, 9840.0f, 9841.0f, 9842.0f, 9843.0f, 9844.0f, 9845.0f, 9846.0f, 9847.0f, 9848.0f, 9849.0f, 9850.0f, 9851.0f, 9852.0f, 9853.0f, 9854.0f, 9855.0f, 9856.0f, 9857.0f, 9858.0f, 9859.0f, 9860.0f, 9861.0f, 9862.0f, 9863.0f, 9864.0f, 9865.0f, 9866.0f, 9867.0f, 9868.0f, 9869.0f, 9870.0f, 9871.0f, 9872.0f, 9873.0f, 9874.0f, 9875.0f, 9876.0f, 9877.0f, 9878.0f, 9879.0f, 9880.0f, 9881.0f, 9882.0f, 9883.0f, 9884.0f, 9885.0f, 9886.0f, 9887.0f, 9888.0f, 9889.0f, 9890.0f, 9891.0f, 9892.0f, 9893.0f, 9894.0f, 9895.0f, 9896.0f, 9897.0f, 9898.0f, 9899.0f, -4920.0f, -4921.0f, -4922.0f, -4923.0f, -4924.0f, -4925.0f, -4926.0f, -4927.0f, -4928.0f, -4929.0f, -4930.0f, -4931.0f, -4932.0f, -4933.0f, -4934.0f, -4935.0f, -4936.0f, -4937.0f, -4938.0f, -4939.0f, -4940.0f, -4941.0f, -4942.0f, -4943.0f, -4944.0f, -4945.0f, -4946.0f, -4947.0f, -4948.0f, -4949.0f, 9900.0f, 9901.0f, 9902.0f, 9903.0f, 9904.0f, 9905.0f, 9906.0f, 9907.0f, 9908.0f, 9909.0f, 9910.0f, 9911.0f, 9912.0f, 9913.0f, 9914.0f, 9915.0f, 9916.0f, 9917.0f, 9918.0f, 9919.0f, 9920.0f, 9921.0f, 9922.0f, 9923.0f, 9924.0f, 9925.0f, 9926.0f, 9927.0f, 9928.0f, 9929.0f, 9930.0f, 9931.0f, 9932.0f, 9933.0f, 9934.0f, 9935.0f, 9936.0f, 9937.0f, 9938.0f, 9939.0f, 9940.0f, 9941.0f, 9942.0f, 9943.0f, 9944.0f, 9945.0f, 9946.0f, 9947.0f, 9948.0f, 9949.0f, 9950.0f, 9951.0f, 9952.0f, 9953.0f, 9954.0f, 9955.0f, 9956.0f, 9957.0f, 9958.0f, 9959.0f, -4950.0f, -4951.0f, -4952.0f, -4953.0f, -4954.0f, -4955.0f, -4956.0f, -4957.0f, -4958.0f, -4959.0f, -4960.0f, -4961.0f, -4962.0f, -4963.0f, -4964.0f, -4965.0f, -4966.0f, -4967.0f, -4968.0f, -4969.0f, -4970.0f, -4971.0f, -4972.0f, -4973.0f, -4974.0f, -4975.0f, -4976.0f, -4977.0f, -4978.0f, -4979.0f, 9960.0f, 9961.0f, 9962.0f, 9963.0f, 9964.0f, 9965.0f, 9966.0f, 9967.0f, 9968.0f, 9969.0f, 9970.0f, 9971.0f, 9972.0f, 9973.0f, 9974.0f, 9975.0f, 9976.0f, 9977.0f, 9978.0f, 9979.0f, 9980.0f, 9981.0f, 9982.0f, 9983.0f, 9984.0f, 9985.0f, 9986.0f, 9987.0f, 9988.0f, 9989.0f, 9990.0f, 9991.0f, 9992.0f, 9993.0f, 9994.0f, 9995.0f, 9996.0f, 9997.0f, 9998.0f, 9999.0f, 10000.0f, 10001.0f, 10002.0f, 10003.0f, 10004.0f, 10005.0f, 10006.0f, 10007.0f, 10008.0f, 10009.0f, 10010.0f, 10011.0f, 10012.0f, 10013.0f, 10014.0f, 10015.0f, 10016.0f, 10017.0f, 10018.0f, 10019.0f, -4980.0f, -4981.0f, -4982.0f, -4983.0f, -4984.0f, -4985.0f, -4986.0f, -4987.0f, -4988.0f, -4989.0f, -4990.0f, -4991.0f, -4992.0f, -4993.0f, -4994.0f, -4995.0f, -4996.0f, -4997.0f, -4998.0f, -4999.0f, -5000.0f, -5001.0f, -5002.0f, -5003.0f, -5004.0f, -5005.0f, -5006.0f, -5007.0f, -5008.0f, -5009.0f, 10020.0f, 10021.0f, 10022.0f, 10023.0f, 10024.0f, 10025.0f, 10026.0f, 10027.0f, 10028.0f, 10029.0f, 10030.0f, 10031.0f, 10032.0f, 10033.0f, 10034.0f, 10035.0f, 10036.0f, 10037.0f, 10038.0f, 10039.0f, 10040.0f, 10041.0f, 10042.0f, 10043.0f, 10044.0f, 10045.0f, 10046.0f, 10047.0f, 10048.0f, 10049.0f, 10050.0f, 10051.0f, 10052.0f, 10053.0f, 10054.0f, 10055.0f, 10056.0f, 10057.0f, 10058.0f, 10059.0f, 10060.0f, 10061.0f, 10062.0f, 10063.0f, 10064.0f, 10065.0f, 10066.0f, 10067.0f, 10068.0f, 10069.0f, 10070.0f, 10071.0f, 10072.0f, 10073.0f, 10074.0f, 10075.0f, 10076.0f, 10077.0f, 10078.0f, 10079.0f, -5010.0f, -5011.0f, -5012.0f, -5013.0f, -5014.0f, -5015.0f, -5016.0f, -5017.0f, -5018.0f, -5019.0f, -5020.0f, -5021.0f, -5022.0f, -5023.0f, -5024.0f, -5025.0f, -5026.0f, -5027.0f, -5028.0f, -5029.0f, -5030.0f, -5031.0f, -5032.0f, -5033.0f, -5034.0f, -5035.0f, -5036.0f, -5037.0f, -5038.0f, -5039.0f, 10080.0f, 10081.0f, 10082.0f, 10083.0f, 10084.0f, 10085.0f, 10086.0f, 10087.0f, 10088.0f, 10089.0f, 10090.0f, 10091.0f, 10092.0f, 10093.0f, 10094.0f, 10095.0f, 10096.0f, 10097.0f, 10098.0f, 10099.0f, 10100.0f, 10101.0f, 10102.0f, 10103.0f, 10104.0f, 10105.0f, 10106.0f, 10107.0f, 10108.0f, 10109.0f, 10110.0f, 10111.0f, 10112.0f, 10113.0f, 10114.0f, 10115.0f, 10116.0f, 10117.0f, 10118.0f, 10119.0f, 10120.0f, 10121.0f, 10122.0f, 10123.0f, 10124.0f, 10125.0f, 10126.0f, 10127.0f, 10128.0f, 10129.0f, 10130.0f, 10131.0f, 10132.0f, 10133.0f, 10134.0f, 10135.0f, 10136.0f, 10137.0f, 10138.0f, 10139.0f, -5040.0f, -5041.0f, -5042.0f, -5043.0f, -5044.0f, -5045.0f, -5046.0f, -5047.0f, -5048.0f, -5049.0f, -5050.0f, -5051.0f, -5052.0f, -5053.0f, -5054.0f, -5055.0f, -5056.0f, -5057.0f, -5058.0f, -5059.0f, -5060.0f, -5061.0f, -5062.0f, -5063.0f, -5064.0f, -5065.0f, -5066.0f, -5067.0f, -5068.0f, -5069.0f, 10140.0f, 10141.0f, 10142.0f, 10143.0f, 10144.0f, 10145.0f, 10146.0f, 10147.0f, 10148.0f, 10149.0f, 10150.0f, 10151.0f, 10152.0f, 10153.0f, 10154.0f, 10155.0f, 10156.0f, 10157.0f, 10158.0f, 10159.0f, 10160.0f, 10161.0f, 10162.0f, 10163.0f, 10164.0f, 10165.0f, 10166.0f, 10167.0f, 10168.0f, 10169.0f, 10170.0f, 10171.0f, 10172.0f, 10173.0f, 10174.0f, 10175.0f, 10176.0f, 10177.0f, 10178.0f, 10179.0f, 10180.0f, 10181.0f, 10182.0f, 10183.0f, 10184.0f, 10185.0f, 10186.0f, 10187.0f, 10188.0f, 10189.0f, 10190.0f, 10191.0f, 10192.0f, 10193.0f, 10194.0f, 10195.0f, 10196.0f, 10197.0f, 10198.0f, 10199.0f, -5070.0f, -5071.0f, -5072.0f, -5073.0f, -5074.0f, -5075.0f, -5076.0f, -5077.0f, -5078.0f, -5079.0f, -5080.0f, -5081.0f, -5082.0f, -5083.0f, -5084.0f, -5085.0f, -5086.0f, -5087.0f, -5088.0f, -5089.0f, -5090.0f, -5091.0f, -5092.0f, -5093.0f, -5094.0f, -5095.0f, -5096.0f, -5097.0f, -5098.0f, -5099.0f, 10200.0f, 10201.0f, 10202.0f, 10203.0f, 10204.0f, 10205.0f, 10206.0f, 10207.0f, 10208.0f, 10209.0f, 10210.0f, 10211.0f, 10212.0f, 10213.0f, 10214.0f, 10215.0f, 10216.0f, 10217.0f, 10218.0f, 10219.0f, 10220.0f, 10221.0f, 10222.0f, 10223.0f, 10224.0f, 10225.0f, 10226.0f, 10227.0f, 10228.0f, 10229.0f, 10230.0f, 10231.0f, 10232.0f, 10233.0f, 10234.0f, 10235.0f, 10236.0f, 10237.0f, 10238.0f, 10239.0f, 10240.0f, 10241.0f, 10242.0f, 10243.0f, 10244.0f, 10245.0f, 10246.0f, 10247.0f, 10248.0f, 10249.0f, 10250.0f, 10251.0f, 10252.0f, 10253.0f, 10254.0f, 10255.0f, 10256.0f, 10257.0f, 10258.0f, 10259.0f, -5100.0f, -5101.0f, -5102.0f, -5103.0f, -5104.0f, -5105.0f, -5106.0f, -5107.0f, -5108.0f, -5109.0f, -5110.0f, -5111.0f, -5112.0f, -5113.0f, -5114.0f, -5115.0f, -5116.0f, -5117.0f, -5118.0f, -5119.0f, -5120.0f, -5121.0f, -5122.0f, -5123.0f, -5124.0f, -5125.0f, -5126.0f, -5127.0f, -5128.0f, -5129.0f, 10260.0f, 10261.0f, 10262.0f, 10263.0f, 10264.0f, 10265.0f, 10266.0f, 10267.0f, 10268.0f, 10269.0f, 10270.0f, 10271.0f, 10272.0f, 10273.0f, 10274.0f, 10275.0f, 10276.0f, 10277.0f, 10278.0f, 10279.0f, 10280.0f, 10281.0f, 10282.0f, 10283.0f, 10284.0f, 10285.0f, 10286.0f, 10287.0f, 10288.0f, 10289.0f, 10290.0f, 10291.0f, 10292.0f, 10293.0f, 10294.0f, 10295.0f, 10296.0f, 10297.0f, 10298.0f, 10299.0f, 10300.0f, 10301.0f, 10302.0f, 10303.0f, 10304.0f, 10305.0f, 10306.0f, 10307.0f, 10308.0f, 10309.0f, 10310.0f, 10311.0f, 10312.0f, 10313.0f, 10314.0f, 10315.0f, 10316.0f, 10317.0f, 10318.0f, 10319.0f, -5130.0f, -5131.0f, -5132.0f, -5133.0f, -5134.0f, -5135.0f, -5136.0f, -5137.0f, -5138.0f, -5139.0f, -5140.0f, -5141.0f, -5142.0f, -5143.0f, -5144.0f, -5145.0f, -5146.0f, -5147.0f, -5148.0f, -5149.0f, -5150.0f, -5151.0f, -5152.0f, -5153.0f, -5154.0f, -5155.0f, -5156.0f, -5157.0f, -5158.0f, -5159.0f, 10320.0f, 10321.0f, 10322.0f, 10323.0f, 10324.0f, 10325.0f, 10326.0f, 10327.0f, 10328.0f, 10329.0f, 10330.0f, 10331.0f, 10332.0f, 10333.0f, 10334.0f, 10335.0f, 10336.0f, 10337.0f, 10338.0f, 10339.0f, 10340.0f, 10341.0f, 10342.0f, 10343.0f, 10344.0f, 10345.0f, 10346.0f, 10347.0f, 10348.0f, 10349.0f, 10350.0f, 10351.0f, 10352.0f, 10353.0f, 10354.0f, 10355.0f, 10356.0f, 10357.0f, 10358.0f, 10359.0f, 10360.0f, 10361.0f, 10362.0f, 10363.0f, 10364.0f, 10365.0f, 10366.0f, 10367.0f, 10368.0f, 10369.0f, 10370.0f, 10371.0f, 10372.0f, 10373.0f, 10374.0f, 10375.0f, 10376.0f, 10377.0f, 10378.0f, 10379.0f, -5160.0f, -5161.0f, -5162.0f, -5163.0f, -5164.0f, -5165.0f, -5166.0f, -5167.0f, -5168.0f, -5169.0f, -5170.0f, -5171.0f, -5172.0f, -5173.0f, -5174.0f, -5175.0f, -5176.0f, -5177.0f, -5178.0f, -5179.0f, -5180.0f, -5181.0f, -5182.0f, -5183.0f, -5184.0f, -5185.0f, -5186.0f, -5187.0f, -5188.0f, -5189.0f, 10380.0f, 10381.0f, 10382.0f, 10383.0f, 10384.0f, 10385.0f, 10386.0f, 10387.0f, 10388.0f, 10389.0f, 10390.0f, 10391.0f, 10392.0f, 10393.0f, 10394.0f, 10395.0f, 10396.0f, 10397.0f, 10398.0f, 10399.0f, 10400.0f, 10401.0f, 10402.0f, 10403.0f, 10404.0f, 10405.0f, 10406.0f, 10407.0f, 10408.0f, 10409.0f, 10410.0f, 10411.0f, 10412.0f, 10413.0f, 10414.0f, 10415.0f, 10416.0f, 10417.0f, 10418.0f, 10419.0f, 10420.0f, 10421.0f, 10422.0f, 10423.0f, 10424.0f, 10425.0f, 10426.0f, 10427.0f, 10428.0f, 10429.0f, 10430.0f, 10431.0f, 10432.0f, 10433.0f, 10434.0f, 10435.0f, 10436.0f, 10437.0f, 10438.0f, 10439.0f, -5190.0f, -5191.0f, -5192.0f, -5193.0f, -5194.0f, -5195.0f, -5196.0f, -5197.0f, -5198.0f, -5199.0f, -5200.0f, -5201.0f, -5202.0f, -5203.0f, -5204.0f, -5205.0f, -5206.0f, -5207.0f, -5208.0f, -5209.0f, -5210.0f, -5211.0f, -5212.0f, -5213.0f, -5214.0f, -5215.0f, -5216.0f, -5217.0f, -5218.0f, -5219.0f, 10440.0f, 10441.0f, 10442.0f, 10443.0f, 10444.0f, 10445.0f, 10446.0f, 10447.0f, 10448.0f, 10449.0f, 10450.0f, 10451.0f, 10452.0f, 10453.0f, 10454.0f, 10455.0f, 10456.0f, 10457.0f, 10458.0f, 10459.0f, 10460.0f, 10461.0f, 10462.0f, 10463.0f, 10464.0f, 10465.0f, 10466.0f, 10467.0f, 10468.0f, 10469.0f, 10470.0f, 10471.0f, 10472.0f, 10473.0f, 10474.0f, 10475.0f, 10476.0f, 10477.0f, 10478.0f, 10479.0f, 10480.0f, 10481.0f, 10482.0f, 10483.0f, 10484.0f, 10485.0f, 10486.0f, 10487.0f, 10488.0f, 10489.0f, 10490.0f, 10491.0f, 10492.0f, 10493.0f, 10494.0f, 10495.0f, 10496.0f, 10497.0f, 10498.0f, 10499.0f, -5220.0f, -5221.0f, -5222.0f, -5223.0f, -5224.0f, -5225.0f, -5226.0f, -5227.0f, -5228.0f, -5229.0f, -5230.0f, -5231.0f, -5232.0f, -5233.0f, -5234.0f, -5235.0f, -5236.0f, -5237.0f, -5238.0f, -5239.0f, -5240.0f, -5241.0f, -5242.0f, -5243.0f, -5244.0f, -5245.0f, -5246.0f, -5247.0f, -5248.0f, -5249.0f, 10500.0f, 10501.0f, 10502.0f, 10503.0f, 10504.0f, 10505.0f, 10506.0f, 10507.0f, 10508.0f, 10509.0f, 10510.0f, 10511.0f, 10512.0f, 10513.0f, 10514.0f, 10515.0f, 10516.0f, 10517.0f, 10518.0f, 10519.0f, 10520.0f, 10521.0f, 10522.0f, 10523.0f, 10524.0f, 10525.0f, 10526.0f, 10527.0f, 10528.0f, 10529.0f, 10530.0f, 10531.0f, 10532.0f, 10533.0f, 10534.0f, 10535.0f, 10536.0f, 10537.0f, 10538.0f, 10539.0f, 10540.0f, 10541.0f, 10542.0f, 10543.0f, 10544.0f, 10545.0f, 10546.0f, 10547.0f, 10548.0f, 10549.0f, 10550.0f, 10551.0f, 10552.0f, 10553.0f, 10554.0f, 10555.0f, 10556.0f, 10557.0f, 10558.0f, 10559.0f, -5250.0f, -5251.0f, -5252.0f, -5253.0f, -5254.0f, -5255.0f, -5256.0f, -5257.0f, -5258.0f, -5259.0f, -5260.0f, -5261.0f, -5262.0f, -5263.0f, -5264.0f, -5265.0f, -5266.0f, -5267.0f, -5268.0f, -5269.0f, -5270.0f, -5271.0f, -5272.0f, -5273.0f, -5274.0f, -5275.0f, -5276.0f, -5277.0f, -5278.0f, -5279.0f, 10560.0f, 10561.0f, 10562.0f, 10563.0f, 10564.0f, 10565.0f, 10566.0f, 10567.0f, 10568.0f, 10569.0f, 10570.0f, 10571.0f, 10572.0f, 10573.0f, 10574.0f, 10575.0f, 10576.0f, 10577.0f, 10578.0f, 10579.0f, 10580.0f, 10581.0f, 10582.0f, 10583.0f, 10584.0f, 10585.0f, 10586.0f, 10587.0f, 10588.0f, 10589.0f, 10590.0f, 10591.0f, 10592.0f, 10593.0f, 10594.0f, 10595.0f, 10596.0f, 10597.0f, 10598.0f, 10599.0f, 10600.0f, 10601.0f, 10602.0f, 10603.0f, 10604.0f, 10605.0f, 10606.0f, 10607.0f, 10608.0f, 10609.0f, 10610.0f, 10611.0f, 10612.0f, 10613.0f, 10614.0f, 10615.0f, 10616.0f, 10617.0f, 10618.0f, 10619.0f, -5280.0f, -5281.0f, -5282.0f, -5283.0f, -5284.0f, -5285.0f, -5286.0f, -5287.0f, -5288.0f, -5289.0f, -5290.0f, -5291.0f, -5292.0f, -5293.0f, -5294.0f, -5295.0f, -5296.0f, -5297.0f, -5298.0f, -5299.0f, -5300.0f, -5301.0f, -5302.0f, -5303.0f, -5304.0f, -5305.0f, -5306.0f, -5307.0f, -5308.0f, -5309.0f, 10620.0f, 10621.0f, 10622.0f, 10623.0f, 10624.0f, 10625.0f, 10626.0f, 10627.0f, 10628.0f, 10629.0f, 10630.0f, 10631.0f, 10632.0f, 10633.0f, 10634.0f, 10635.0f, 10636.0f, 10637.0f, 10638.0f, 10639.0f, 10640.0f, 10641.0f, 10642.0f, 10643.0f, 10644.0f, 10645.0f, 10646.0f, 10647.0f, 10648.0f, 10649.0f, 10650.0f, 10651.0f, 10652.0f, 10653.0f, 10654.0f, 10655.0f, 10656.0f, 10657.0f, 10658.0f, 10659.0f, 10660.0f, 10661.0f, 10662.0f, 10663.0f, 10664.0f, 10665.0f, 10666.0f, 10667.0f, 10668.0f, 10669.0f, 10670.0f, 10671.0f, 10672.0f, 10673.0f, 10674.0f, 10675.0f, 10676.0f, 10677.0f, 10678.0f, 10679.0f, -5310.0f, -5311.0f, -5312.0f, -5313.0f, -5314.0f, -5315.0f, -5316.0f, -5317.0f, -5318.0f, -5319.0f, -5320.0f, -5321.0f, -5322.0f, -5323.0f, -5324.0f, -5325.0f, -5326.0f, -5327.0f, -5328.0f, -5329.0f, -5330.0f, -5331.0f, -5332.0f, -5333.0f, -5334.0f, -5335.0f, -5336.0f, -5337.0f, -5338.0f, -5339.0f, 10680.0f, 10681.0f, 10682.0f, 10683.0f, 10684.0f, 10685.0f, 10686.0f, 10687.0f, 10688.0f, 10689.0f, 10690.0f, 10691.0f, 10692.0f, 10693.0f, 10694.0f, 10695.0f, 10696.0f, 10697.0f, 10698.0f, 10699.0f, 10700.0f, 10701.0f, 10702.0f, 10703.0f, 10704.0f, 10705.0f, 10706.0f, 10707.0f, 10708.0f, 10709.0f, 10710.0f, 10711.0f, 10712.0f, 10713.0f, 10714.0f, 10715.0f, 10716.0f, 10717.0f, 10718.0f, 10719.0f, 10720.0f, 10721.0f, 10722.0f, 10723.0f, 10724.0f, 10725.0f, 10726.0f, 10727.0f, 10728.0f, 10729.0f, 10730.0f, 10731.0f, 10732.0f, 10733.0f, 10734.0f, 10735.0f, 10736.0f, 10737.0f, 10738.0f, 10739.0f, -5340.0f, -5341.0f, -5342.0f, -5343.0f, -5344.0f, -5345.0f, -5346.0f, -5347.0f, -5348.0f, -5349.0f, -5350.0f, -5351.0f, -5352.0f, -5353.0f, -5354.0f, -5355.0f, -5356.0f, -5357.0f, -5358.0f, -5359.0f, -5360.0f, -5361.0f, -5362.0f, -5363.0f, -5364.0f, -5365.0f, -5366.0f, -5367.0f, -5368.0f, -5369.0f, 10740.0f, 10741.0f, 10742.0f, 10743.0f, 10744.0f, 10745.0f, 10746.0f, 10747.0f, 10748.0f, 10749.0f, 10750.0f, 10751.0f, 10752.0f, 10753.0f, 10754.0f, 10755.0f, 10756.0f, 10757.0f, 10758.0f, 10759.0f, 10760.0f, 10761.0f, 10762.0f, 10763.0f, 10764.0f, 10765.0f, 10766.0f, 10767.0f, 10768.0f, 10769.0f, 10770.0f, 10771.0f, 10772.0f, 10773.0f, 10774.0f, 10775.0f, 10776.0f, 10777.0f, 10778.0f, 10779.0f, 10780.0f, 10781.0f, 10782.0f, 10783.0f, 10784.0f, 10785.0f, 10786.0f, 10787.0f, 10788.0f, 10789.0f, 10790.0f, 10791.0f, 10792.0f, 10793.0f, 10794.0f, 10795.0f, 10796.0f, 10797.0f, 10798.0f, 10799.0f, -5370.0f, -5371.0f, -5372.0f, -5373.0f, -5374.0f, -5375.0f, -5376.0f, -5377.0f, -5378.0f, -5379.0f, -5380.0f, -5381.0f, -5382.0f, -5383.0f, -5384.0f, -5385.0f, -5386.0f, -5387.0f, -5388.0f, -5389.0f, -5390.0f, -5391.0f, -5392.0f, -5393.0f, -5394.0f, -5395.0f, -5396.0f, -5397.0f, -5398.0f, -5399.0f, 10800.0f, 10801.0f, 10802.0f, 10803.0f, 10804.0f, 10805.0f, 10806.0f, 10807.0f, 10808.0f, 10809.0f, 10810.0f, 10811.0f, 10812.0f, 10813.0f, 10814.0f, 10815.0f, 10816.0f, 10817.0f, 10818.0f, 10819.0f, 10820.0f, 10821.0f, 10822.0f, 10823.0f, 10824.0f, 10825.0f, 10826.0f, 10827.0f, 10828.0f, 10829.0f, 10830.0f, 10831.0f, 10832.0f, 10833.0f, 10834.0f, 10835.0f, 10836.0f, 10837.0f, 10838.0f, 10839.0f, 10840.0f, 10841.0f, 10842.0f, 10843.0f, 10844.0f, 10845.0f, 10846.0f, 10847.0f, 10848.0f, 10849.0f, 10850.0f, 10851.0f, 10852.0f, 10853.0f, 10854.0f, 10855.0f, 10856.0f, 10857.0f, 10858.0f, 10859.0f, -5400.0f, -5401.0f, -5402.0f, -5403.0f, -5404.0f, -5405.0f, -5406.0f, -5407.0f, -5408.0f, -5409.0f, -5410.0f, -5411.0f, -5412.0f, -5413.0f, -5414.0f, -5415.0f, -5416.0f, -5417.0f, -5418.0f, -5419.0f, -5420.0f, -5421.0f, -5422.0f, -5423.0f, -5424.0f, -5425.0f, -5426.0f, -5427.0f, -5428.0f, -5429.0f, 10860.0f, 10861.0f, 10862.0f, 10863.0f, 10864.0f, 10865.0f, 10866.0f, 10867.0f, 10868.0f, 10869.0f, 10870.0f, 10871.0f, 10872.0f, 10873.0f, 10874.0f, 10875.0f, 10876.0f, 10877.0f, 10878.0f, 10879.0f, 10880.0f, 10881.0f, 10882.0f, 10883.0f, 10884.0f, 10885.0f, 10886.0f, 10887.0f, 10888.0f, 10889.0f, 10890.0f, 10891.0f, 10892.0f, 10893.0f, 10894.0f, 10895.0f, 10896.0f, 10897.0f, 10898.0f, 10899.0f, 10900.0f, 10901.0f, 10902.0f, 10903.0f, 10904.0f, 10905.0f, 10906.0f, 10907.0f, 10908.0f, 10909.0f, 10910.0f, 10911.0f, 10912.0f, 10913.0f, 10914.0f, 10915.0f, 10916.0f, 10917.0f, 10918.0f, 10919.0f, -5430.0f, -5431.0f, -5432.0f, -5433.0f, -5434.0f, -5435.0f, -5436.0f, -5437.0f, -5438.0f, -5439.0f, -5440.0f, -5441.0f, -5442.0f, -5443.0f, -5444.0f, -5445.0f, -5446.0f, -5447.0f, -5448.0f, -5449.0f, -5450.0f, -5451.0f, -5452.0f, -5453.0f, -5454.0f, -5455.0f, -5456.0f, -5457.0f, -5458.0f, -5459.0f, 10920.0f, 10921.0f, 10922.0f, 10923.0f, 10924.0f, 10925.0f, 10926.0f, 10927.0f, 10928.0f, 10929.0f, 10930.0f, 10931.0f, 10932.0f, 10933.0f, 10934.0f, 10935.0f, 10936.0f, 10937.0f, 10938.0f, 10939.0f, 10940.0f, 10941.0f, 10942.0f, 10943.0f, 10944.0f, 10945.0f, 10946.0f, 10947.0f, 10948.0f, 10949.0f, 10950.0f, 10951.0f, 10952.0f, 10953.0f, 10954.0f, 10955.0f, 10956.0f, 10957.0f, 10958.0f, 10959.0f, 10960.0f, 10961.0f, 10962.0f, 10963.0f, 10964.0f, 10965.0f, 10966.0f, 10967.0f, 10968.0f, 10969.0f, 10970.0f, 10971.0f, 10972.0f, 10973.0f, 10974.0f, 10975.0f, 10976.0f, 10977.0f, 10978.0f, 10979.0f, -5460.0f, -5461.0f, -5462.0f, -5463.0f, -5464.0f, -5465.0f, -5466.0f, -5467.0f, -5468.0f, -5469.0f, -5470.0f, -5471.0f, -5472.0f, -5473.0f, -5474.0f, -5475.0f, -5476.0f, -5477.0f, -5478.0f, -5479.0f, -5480.0f, -5481.0f, -5482.0f, -5483.0f, -5484.0f, -5485.0f, -5486.0f, -5487.0f, -5488.0f, -5489.0f, 10980.0f, 10981.0f, 10982.0f, 10983.0f, 10984.0f, 10985.0f, 10986.0f, 10987.0f, 10988.0f, 10989.0f, 10990.0f, 10991.0f, 10992.0f, 10993.0f, 10994.0f, 10995.0f, 10996.0f, 10997.0f, 10998.0f, 10999.0f, 11000.0f, 11001.0f, 11002.0f, 11003.0f, 11004.0f, 11005.0f, 11006.0f, 11007.0f, 11008.0f, 11009.0f, 11010.0f, 11011.0f, 11012.0f, 11013.0f, 11014.0f, 11015.0f, 11016.0f, 11017.0f, 11018.0f, 11019.0f, 11020.0f, 11021.0f, 11022.0f, 11023.0f, 11024.0f, 11025.0f, 11026.0f, 11027.0f, 11028.0f, 11029.0f, 11030.0f, 11031.0f, 11032.0f, 11033.0f, 11034.0f, 11035.0f, 11036.0f, 11037.0f, 11038.0f, 11039.0f, -5490.0f, -5491.0f, -5492.0f, -5493.0f, -5494.0f, -5495.0f, -5496.0f, -5497.0f, -5498.0f, -5499.0f, -5500.0f, -5501.0f, -5502.0f, -5503.0f, -5504.0f, -5505.0f, -5506.0f, -5507.0f, -5508.0f, -5509.0f, -5510.0f, -5511.0f, -5512.0f, -5513.0f, -5514.0f, -5515.0f, -5516.0f, -5517.0f, -5518.0f, -5519.0f, 11040.0f, 11041.0f, 11042.0f, 11043.0f, 11044.0f, 11045.0f, 11046.0f, 11047.0f, 11048.0f, 11049.0f, 11050.0f, 11051.0f, 11052.0f, 11053.0f, 11054.0f, 11055.0f, 11056.0f, 11057.0f, 11058.0f, 11059.0f, 11060.0f, 11061.0f, 11062.0f, 11063.0f, 11064.0f, 11065.0f, 11066.0f, 11067.0f, 11068.0f, 11069.0f, 11070.0f, 11071.0f, 11072.0f, 11073.0f, 11074.0f, 11075.0f, 11076.0f, 11077.0f, 11078.0f, 11079.0f, 11080.0f, 11081.0f, 11082.0f, 11083.0f, 11084.0f, 11085.0f, 11086.0f, 11087.0f, 11088.0f, 11089.0f, 11090.0f, 11091.0f, 11092.0f, 11093.0f, 11094.0f, 11095.0f, 11096.0f, 11097.0f, 11098.0f, 11099.0f, -5520.0f, -5521.0f, -5522.0f, -5523.0f, -5524.0f, -5525.0f, -5526.0f, -5527.0f, -5528.0f, -5529.0f, -5530.0f, -5531.0f, -5532.0f, -5533.0f, -5534.0f, -5535.0f, -5536.0f, -5537.0f, -5538.0f, -5539.0f, -5540.0f, -5541.0f, -5542.0f, -5543.0f, -5544.0f, -5545.0f, -5546.0f, -5547.0f, -5548.0f, -5549.0f, 11100.0f, 11101.0f, 11102.0f, 11103.0f, 11104.0f, 11105.0f, 11106.0f, 11107.0f, 11108.0f, 11109.0f, 11110.0f, 11111.0f, 11112.0f, 11113.0f, 11114.0f, 11115.0f, 11116.0f, 11117.0f, 11118.0f, 11119.0f, 11120.0f, 11121.0f, 11122.0f, 11123.0f, 11124.0f, 11125.0f, 11126.0f, 11127.0f, 11128.0f, 11129.0f, 11130.0f, 11131.0f, 11132.0f, 11133.0f, 11134.0f, 11135.0f, 11136.0f, 11137.0f, 11138.0f, 11139.0f, 11140.0f, 11141.0f, 11142.0f, 11143.0f, 11144.0f, 11145.0f, 11146.0f, 11147.0f, 11148.0f, 11149.0f, 11150.0f, 11151.0f, 11152.0f, 11153.0f, 11154.0f, 11155.0f, 11156.0f, 11157.0f, 11158.0f, 11159.0f, -5550.0f, -5551.0f, -5552.0f, -5553.0f, -5554.0f, -5555.0f, -5556.0f, -5557.0f, -5558.0f, -5559.0f, -5560.0f, -5561.0f, -5562.0f, -5563.0f, -5564.0f, -5565.0f, -5566.0f, -5567.0f, -5568.0f, -5569.0f, -5570.0f, -5571.0f, -5572.0f, -5573.0f, -5574.0f, -5575.0f, -5576.0f, -5577.0f, -5578.0f, -5579.0f, 11160.0f, 11161.0f, 11162.0f, 11163.0f, 11164.0f, 11165.0f, 11166.0f, 11167.0f, 11168.0f, 11169.0f, 11170.0f, 11171.0f, 11172.0f, 11173.0f, 11174.0f, 11175.0f, 11176.0f, 11177.0f, 11178.0f, 11179.0f, 11180.0f, 11181.0f, 11182.0f, 11183.0f, 11184.0f, 11185.0f, 11186.0f, 11187.0f, 11188.0f, 11189.0f, 11190.0f, 11191.0f, 11192.0f, 11193.0f, 11194.0f, 11195.0f, 11196.0f, 11197.0f, 11198.0f, 11199.0f, 11200.0f, 11201.0f, 11202.0f, 11203.0f, 11204.0f, 11205.0f, 11206.0f, 11207.0f, 11208.0f, 11209.0f, 11210.0f, 11211.0f, 11212.0f, 11213.0f, 11214.0f, 11215.0f, 11216.0f, 11217.0f, 11218.0f, 11219.0f, -5580.0f, -5581.0f, -5582.0f, -5583.0f, -5584.0f, -5585.0f, -5586.0f, -5587.0f, -5588.0f, -5589.0f, -5590.0f, -5591.0f, -5592.0f, -5593.0f, -5594.0f, -5595.0f, -5596.0f, -5597.0f, -5598.0f, -5599.0f, -5600.0f, -5601.0f, -5602.0f, -5603.0f, -5604.0f, -5605.0f, -5606.0f, -5607.0f, -5608.0f, -5609.0f, 11220.0f, 11221.0f, 11222.0f, 11223.0f, 11224.0f, 11225.0f, 11226.0f, 11227.0f, 11228.0f, 11229.0f, 11230.0f, 11231.0f, 11232.0f, 11233.0f, 11234.0f, 11235.0f, 11236.0f, 11237.0f, 11238.0f, 11239.0f, 11240.0f, 11241.0f, 11242.0f, 11243.0f, 11244.0f, 11245.0f, 11246.0f, 11247.0f, 11248.0f, 11249.0f, 11250.0f, 11251.0f, 11252.0f, 11253.0f, 11254.0f, 11255.0f, 11256.0f, 11257.0f, 11258.0f, 11259.0f, 11260.0f, 11261.0f, 11262.0f, 11263.0f, 11264.0f, 11265.0f, 11266.0f, 11267.0f, 11268.0f, 11269.0f, 11270.0f, 11271.0f, 11272.0f, 11273.0f, 11274.0f, 11275.0f, 11276.0f, 11277.0f, 11278.0f, 11279.0f, -5610.0f, -5611.0f, -5612.0f, -5613.0f, -5614.0f, -5615.0f, -5616.0f, -5617.0f, -5618.0f, -5619.0f, -5620.0f, -5621.0f, -5622.0f, -5623.0f, -5624.0f, -5625.0f, -5626.0f, -5627.0f, -5628.0f, -5629.0f, -5630.0f, -5631.0f, -5632.0f, -5633.0f, -5634.0f, -5635.0f, -5636.0f, -5637.0f, -5638.0f, -5639.0f, 11280.0f, 11281.0f, 11282.0f, 11283.0f, 11284.0f, 11285.0f, 11286.0f, 11287.0f, 11288.0f, 11289.0f, 11290.0f, 11291.0f, 11292.0f, 11293.0f, 11294.0f, 11295.0f, 11296.0f, 11297.0f, 11298.0f, 11299.0f, 11300.0f, 11301.0f, 11302.0f, 11303.0f, 11304.0f, 11305.0f, 11306.0f, 11307.0f, 11308.0f, 11309.0f, 11310.0f, 11311.0f, 11312.0f, 11313.0f, 11314.0f, 11315.0f, 11316.0f, 11317.0f, 11318.0f, 11319.0f, 11320.0f, 11321.0f, 11322.0f, 11323.0f, 11324.0f, 11325.0f, 11326.0f, 11327.0f, 11328.0f, 11329.0f, 11330.0f, 11331.0f, 11332.0f, 11333.0f, 11334.0f, 11335.0f, 11336.0f, 11337.0f, 11338.0f, 11339.0f, -5640.0f, -5641.0f, -5642.0f, -5643.0f, -5644.0f, -5645.0f, -5646.0f, -5647.0f, -5648.0f, -5649.0f, -5650.0f, -5651.0f, -5652.0f, -5653.0f, -5654.0f, -5655.0f, -5656.0f, -5657.0f, -5658.0f, -5659.0f, -5660.0f, -5661.0f, -5662.0f, -5663.0f, -5664.0f, -5665.0f, -5666.0f, -5667.0f, -5668.0f, -5669.0f, 11340.0f, 11341.0f, 11342.0f, 11343.0f, 11344.0f, 11345.0f, 11346.0f, 11347.0f, 11348.0f, 11349.0f, 11350.0f, 11351.0f, 11352.0f, 11353.0f, 11354.0f, 11355.0f, 11356.0f, 11357.0f, 11358.0f, 11359.0f, 11360.0f, 11361.0f, 11362.0f, 11363.0f, 11364.0f, 11365.0f, 11366.0f, 11367.0f, 11368.0f, 11369.0f, 11370.0f, 11371.0f, 11372.0f, 11373.0f, 11374.0f, 11375.0f, 11376.0f, 11377.0f, 11378.0f, 11379.0f, 11380.0f, 11381.0f, 11382.0f, 11383.0f, 11384.0f, 11385.0f, 11386.0f, 11387.0f, 11388.0f, 11389.0f, 11390.0f, 11391.0f, 11392.0f, 11393.0f, 11394.0f, 11395.0f, 11396.0f, 11397.0f, 11398.0f, 11399.0f, -5670.0f, -5671.0f, -5672.0f, -5673.0f, -5674.0f, -5675.0f, -5676.0f, -5677.0f, -5678.0f, -5679.0f, -5680.0f, -5681.0f, -5682.0f, -5683.0f, -5684.0f, -5685.0f, -5686.0f, -5687.0f, -5688.0f, -5689.0f, -5690.0f, -5691.0f, -5692.0f, -5693.0f, -5694.0f, -5695.0f, -5696.0f, -5697.0f, -5698.0f, -5699.0f, 11400.0f, 11401.0f, 11402.0f, 11403.0f, 11404.0f, 11405.0f, 11406.0f, 11407.0f, 11408.0f, 11409.0f, 11410.0f, 11411.0f, 11412.0f, 11413.0f, 11414.0f, 11415.0f, 11416.0f, 11417.0f, 11418.0f, 11419.0f, 11420.0f, 11421.0f, 11422.0f, 11423.0f, 11424.0f, 11425.0f, 11426.0f, 11427.0f, 11428.0f, 11429.0f, 11430.0f, 11431.0f, 11432.0f, 11433.0f, 11434.0f, 11435.0f, 11436.0f, 11437.0f, 11438.0f, 11439.0f, 11440.0f, 11441.0f, 11442.0f, 11443.0f, 11444.0f, 11445.0f, 11446.0f, 11447.0f, 11448.0f, 11449.0f, 11450.0f, 11451.0f, 11452.0f, 11453.0f, 11454.0f, 11455.0f, 11456.0f, 11457.0f, 11458.0f, 11459.0f, -5700.0f, -5701.0f, -5702.0f, -5703.0f, -5704.0f, -5705.0f, -5706.0f, -5707.0f, -5708.0f, -5709.0f, -5710.0f, -5711.0f, -5712.0f, -5713.0f, -5714.0f, -5715.0f, -5716.0f, -5717.0f, -5718.0f, -5719.0f, -5720.0f, -5721.0f, -5722.0f, -5723.0f, -5724.0f, -5725.0f, -5726.0f, -5727.0f, -5728.0f, -5729.0f, 11460.0f, 11461.0f, 11462.0f, 11463.0f, 11464.0f, 11465.0f, 11466.0f, 11467.0f, 11468.0f, 11469.0f, 11470.0f, 11471.0f, 11472.0f, 11473.0f, 11474.0f, 11475.0f, 11476.0f, 11477.0f, 11478.0f, 11479.0f, 11480.0f, 11481.0f, 11482.0f, 11483.0f, 11484.0f, 11485.0f, 11486.0f, 11487.0f, 11488.0f, 11489.0f, 11490.0f, 11491.0f, 11492.0f, 11493.0f, 11494.0f, 11495.0f, 11496.0f, 11497.0f, 11498.0f, 11499.0f, 11500.0f, 11501.0f, 11502.0f, 11503.0f, 11504.0f, 11505.0f, 11506.0f, 11507.0f, 11508.0f, 11509.0f, 11510.0f, 11511.0f, 11512.0f, 11513.0f, 11514.0f, 11515.0f, 11516.0f, 11517.0f, 11518.0f, 11519.0f, -5730.0f, -5731.0f, -5732.0f, -5733.0f, -5734.0f, -5735.0f, -5736.0f, -5737.0f, -5738.0f, -5739.0f, -5740.0f, -5741.0f, -5742.0f, -5743.0f, -5744.0f, -5745.0f, -5746.0f, -5747.0f, -5748.0f, -5749.0f, -5750.0f, -5751.0f, -5752.0f, -5753.0f, -5754.0f, -5755.0f, -5756.0f, -5757.0f, -5758.0f, -5759.0f, 11520.0f, 11521.0f, 11522.0f, 11523.0f, 11524.0f, 11525.0f, 11526.0f, 11527.0f, 11528.0f, 11529.0f, 11530.0f, 11531.0f, 11532.0f, 11533.0f, 11534.0f, 11535.0f, 11536.0f, 11537.0f, 11538.0f, 11539.0f, 11540.0f, 11541.0f, 11542.0f, 11543.0f, 11544.0f, 11545.0f, 11546.0f, 11547.0f, 11548.0f, 11549.0f, 11550.0f, 11551.0f, 11552.0f, 11553.0f, 11554.0f, 11555.0f, 11556.0f, 11557.0f, 11558.0f, 11559.0f, 11560.0f, 11561.0f, 11562.0f, 11563.0f, 11564.0f, 11565.0f, 11566.0f, 11567.0f, 11568.0f, 11569.0f, 11570.0f, 11571.0f, 11572.0f, 11573.0f, 11574.0f, 11575.0f, 11576.0f, 11577.0f, 11578.0f, 11579.0f, -5760.0f, -5761.0f, -5762.0f, -5763.0f, -5764.0f, -5765.0f, -5766.0f, -5767.0f, -5768.0f, -5769.0f, -5770.0f, -5771.0f, -5772.0f, -5773.0f, -5774.0f, -5775.0f, -5776.0f, -5777.0f, -5778.0f, -5779.0f, -5780.0f, -5781.0f, -5782.0f, -5783.0f, -5784.0f, -5785.0f, -5786.0f, -5787.0f, -5788.0f, -5789.0f, 11580.0f, 11581.0f, 11582.0f, 11583.0f, 11584.0f, 11585.0f, 11586.0f, 11587.0f, 11588.0f, 11589.0f, 11590.0f, 11591.0f, 11592.0f, 11593.0f, 11594.0f, 11595.0f, 11596.0f, 11597.0f, 11598.0f, 11599.0f, 11600.0f, 11601.0f, 11602.0f, 11603.0f, 11604.0f, 11605.0f, 11606.0f, 11607.0f, 11608.0f, 11609.0f, 11610.0f, 11611.0f, 11612.0f, 11613.0f, 11614.0f, 11615.0f, 11616.0f, 11617.0f, 11618.0f, 11619.0f, 11620.0f, 11621.0f, 11622.0f, 11623.0f, 11624.0f, 11625.0f, 11626.0f, 11627.0f, 11628.0f, 11629.0f, 11630.0f, 11631.0f, 11632.0f, 11633.0f, 11634.0f, 11635.0f, 11636.0f, 11637.0f, 11638.0f, 11639.0f, -5790.0f, -5791.0f, -5792.0f, -5793.0f, -5794.0f, -5795.0f, -5796.0f, -5797.0f, -5798.0f, -5799.0f, -5800.0f, -5801.0f, -5802.0f, -5803.0f, -5804.0f, -5805.0f, -5806.0f, -5807.0f, -5808.0f, -5809.0f, -5810.0f, -5811.0f, -5812.0f, -5813.0f, -5814.0f, -5815.0f, -5816.0f, -5817.0f, -5818.0f, -5819.0f, 11640.0f, 11641.0f, 11642.0f, 11643.0f, 11644.0f, 11645.0f, 11646.0f, 11647.0f, 11648.0f, 11649.0f, 11650.0f, 11651.0f, 11652.0f, 11653.0f, 11654.0f, 11655.0f, 11656.0f, 11657.0f, 11658.0f, 11659.0f, 11660.0f, 11661.0f, 11662.0f, 11663.0f, 11664.0f, 11665.0f, 11666.0f, 11667.0f, 11668.0f, 11669.0f, 11670.0f, 11671.0f, 11672.0f, 11673.0f, 11674.0f, 11675.0f, 11676.0f, 11677.0f, 11678.0f, 11679.0f, 11680.0f, 11681.0f, 11682.0f, 11683.0f, 11684.0f, 11685.0f, 11686.0f, 11687.0f, 11688.0f, 11689.0f, 11690.0f, 11691.0f, 11692.0f, 11693.0f, 11694.0f, 11695.0f, 11696.0f, 11697.0f, 11698.0f, 11699.0f, -5820.0f, -5821.0f, -5822.0f, -5823.0f, -5824.0f, -5825.0f, -5826.0f, -5827.0f, -5828.0f, -5829.0f, -5830.0f, -5831.0f, -5832.0f, -5833.0f, -5834.0f, -5835.0f, -5836.0f, -5837.0f, -5838.0f, -5839.0f, -5840.0f, -5841.0f, -5842.0f, -5843.0f, -5844.0f, -5845.0f, -5846.0f, -5847.0f, -5848.0f, -5849.0f, 11700.0f, 11701.0f, 11702.0f, 11703.0f, 11704.0f, 11705.0f, 11706.0f, 11707.0f, 11708.0f, 11709.0f, 11710.0f, 11711.0f, 11712.0f, 11713.0f, 11714.0f, 11715.0f, 11716.0f, 11717.0f, 11718.0f, 11719.0f, 11720.0f, 11721.0f, 11722.0f, 11723.0f, 11724.0f, 11725.0f, 11726.0f, 11727.0f, 11728.0f, 11729.0f, 11730.0f, 11731.0f, 11732.0f, 11733.0f, 11734.0f, 11735.0f, 11736.0f, 11737.0f, 11738.0f, 11739.0f, 11740.0f, 11741.0f, 11742.0f, 11743.0f, 11744.0f, 11745.0f, 11746.0f, 11747.0f, 11748.0f, 11749.0f, 11750.0f, 11751.0f, 11752.0f, 11753.0f, 11754.0f, 11755.0f, 11756.0f, 11757.0f, 11758.0f, 11759.0f, -5850.0f, -5851.0f, -5852.0f, -5853.0f, -5854.0f, -5855.0f, -5856.0f, -5857.0f, -5858.0f, -5859.0f, -5860.0f, -5861.0f, -5862.0f, -5863.0f, -5864.0f, -5865.0f, -5866.0f, -5867.0f, -5868.0f, -5869.0f, -5870.0f, -5871.0f, -5872.0f, -5873.0f, -5874.0f, -5875.0f, -5876.0f, -5877.0f, -5878.0f, -5879.0f, 11760.0f, 11761.0f, 11762.0f, 11763.0f, 11764.0f, 11765.0f, 11766.0f, 11767.0f, 11768.0f, 11769.0f, 11770.0f, 11771.0f, 11772.0f, 11773.0f, 11774.0f, 11775.0f, 11776.0f, 11777.0f, 11778.0f, 11779.0f, 11780.0f, 11781.0f, 11782.0f, 11783.0f, 11784.0f, 11785.0f, 11786.0f, 11787.0f, 11788.0f, 11789.0f, 11790.0f, 11791.0f, 11792.0f, 11793.0f, 11794.0f, 11795.0f, 11796.0f, 11797.0f, 11798.0f, 11799.0f, 11800.0f, 11801.0f, 11802.0f, 11803.0f, 11804.0f, 11805.0f, 11806.0f, 11807.0f, 11808.0f, 11809.0f, 11810.0f, 11811.0f, 11812.0f, 11813.0f, 11814.0f, 11815.0f, 11816.0f, 11817.0f, 11818.0f, 11819.0f, -5880.0f, -5881.0f, -5882.0f, -5883.0f, -5884.0f, -5885.0f, -5886.0f, -5887.0f, -5888.0f, -5889.0f, -5890.0f, -5891.0f, -5892.0f, -5893.0f, -5894.0f, -5895.0f, -5896.0f, -5897.0f, -5898.0f, -5899.0f, -5900.0f, -5901.0f, -5902.0f, -5903.0f, -5904.0f, -5905.0f, -5906.0f, -5907.0f, -5908.0f, -5909.0f, 11820.0f, 11821.0f, 11822.0f, 11823.0f, 11824.0f, 11825.0f, 11826.0f, 11827.0f, 11828.0f, 11829.0f, 11830.0f, 11831.0f, 11832.0f, 11833.0f, 11834.0f, 11835.0f, 11836.0f, 11837.0f, 11838.0f, 11839.0f, 11840.0f, 11841.0f, 11842.0f, 11843.0f, 11844.0f, 11845.0f, 11846.0f, 11847.0f, 11848.0f, 11849.0f, 11850.0f, 11851.0f, 11852.0f, 11853.0f, 11854.0f, 11855.0f, 11856.0f, 11857.0f, 11858.0f, 11859.0f, 11860.0f, 11861.0f, 11862.0f, 11863.0f, 11864.0f, 11865.0f, 11866.0f, 11867.0f, 11868.0f, 11869.0f, 11870.0f, 11871.0f, 11872.0f, 11873.0f, 11874.0f, 11875.0f, 11876.0f, 11877.0f, 11878.0f, 11879.0f, -5910.0f, -5911.0f, -5912.0f, -5913.0f, -5914.0f, -5915.0f, -5916.0f, -5917.0f, -5918.0f, -5919.0f, -5920.0f, -5921.0f, -5922.0f, -5923.0f, -5924.0f, -5925.0f, -5926.0f, -5927.0f, -5928.0f, -5929.0f, -5930.0f, -5931.0f, -5932.0f, -5933.0f, -5934.0f, -5935.0f, -5936.0f, -5937.0f, -5938.0f, -5939.0f, 11880.0f, 11881.0f, 11882.0f, 11883.0f, 11884.0f, 11885.0f, 11886.0f, 11887.0f, 11888.0f, 11889.0f, 11890.0f, 11891.0f, 11892.0f, 11893.0f, 11894.0f, 11895.0f, 11896.0f, 11897.0f, 11898.0f, 11899.0f, 11900.0f, 11901.0f, 11902.0f, 11903.0f, 11904.0f, 11905.0f, 11906.0f, 11907.0f, 11908.0f, 11909.0f, 11910.0f, 11911.0f, 11912.0f, 11913.0f, 11914.0f, 11915.0f, 11916.0f, 11917.0f, 11918.0f, 11919.0f, 11920.0f, 11921.0f, 11922.0f, 11923.0f, 11924.0f, 11925.0f, 11926.0f, 11927.0f, 11928.0f, 11929.0f, 11930.0f, 11931.0f, 11932.0f, 11933.0f, 11934.0f, 11935.0f, 11936.0f, 11937.0f, 11938.0f, 11939.0f, -5940.0f, -5941.0f, -5942.0f, -5943.0f, -5944.0f, -5945.0f, -5946.0f, -5947.0f, -5948.0f, -5949.0f, -5950.0f, -5951.0f, -5952.0f, -5953.0f, -5954.0f, -5955.0f, -5956.0f, -5957.0f, -5958.0f, -5959.0f, -5960.0f, -5961.0f, -5962.0f, -5963.0f, -5964.0f, -5965.0f, -5966.0f, -5967.0f, -5968.0f, -5969.0f, 11940.0f, 11941.0f, 11942.0f, 11943.0f, 11944.0f, 11945.0f, 11946.0f, 11947.0f, 11948.0f, 11949.0f, 11950.0f, 11951.0f, 11952.0f, 11953.0f, 11954.0f, 11955.0f, 11956.0f, 11957.0f, 11958.0f, 11959.0f, 11960.0f, 11961.0f, 11962.0f, 11963.0f, 11964.0f, 11965.0f, 11966.0f, 11967.0f, 11968.0f, 11969.0f, 11970.0f, 11971.0f, 11972.0f, 11973.0f, 11974.0f, 11975.0f, 11976.0f, 11977.0f, 11978.0f, 11979.0f, 11980.0f, 11981.0f, 11982.0f, 11983.0f, 11984.0f, 11985.0f, 11986.0f, 11987.0f, 11988.0f, 11989.0f, 11990.0f, 11991.0f, 11992.0f, 11993.0f, 11994.0f, 11995.0f, 11996.0f, 11997.0f, 11998.0f, 11999.0f, -5970.0f, -5971.0f, -5972.0f, -5973.0f, -5974.0f, -5975.0f, -5976.0f, -5977.0f, -5978.0f, -5979.0f, -5980.0f, -5981.0f, -5982.0f, -5983.0f, -5984.0f, -5985.0f, -5986.0f, -5987.0f, -5988.0f, -5989.0f, -5990.0f, -5991.0f, -5992.0f, -5993.0f, -5994.0f, -5995.0f, -5996.0f, -5997.0f, -5998.0f, -5999.0f, 12000.0f, 12001.0f, 12002.0f, 12003.0f, 12004.0f, 12005.0f, 12006.0f, 12007.0f, 12008.0f, 12009.0f, 12010.0f, 12011.0f, 12012.0f, 12013.0f, 12014.0f, 12015.0f, 12016.0f, 12017.0f, 12018.0f, 12019.0f, 12020.0f, 12021.0f, 12022.0f, 12023.0f, 12024.0f, 12025.0f, 12026.0f, 12027.0f, 12028.0f, 12029.0f, 12030.0f, 12031.0f, 12032.0f, 12033.0f, 12034.0f, 12035.0f, 12036.0f, 12037.0f, 12038.0f, 12039.0f, 12040.0f, 12041.0f, 12042.0f, 12043.0f, 12044.0f, 12045.0f, 12046.0f, 12047.0f, 12048.0f, 12049.0f, 12050.0f, 12051.0f, 12052.0f, 12053.0f, 12054.0f, 12055.0f, 12056.0f, 12057.0f, 12058.0f, 12059.0f, -6000.0f, -6001.0f, -6002.0f, -6003.0f, -6004.0f, -6005.0f, -6006.0f, -6007.0f, -6008.0f, -6009.0f, -6010.0f, -6011.0f, -6012.0f, -6013.0f, -6014.0f, -6015.0f, -6016.0f, -6017.0f, -6018.0f, -6019.0f, -6020.0f, -6021.0f, -6022.0f, -6023.0f, -6024.0f, -6025.0f, -6026.0f, -6027.0f, -6028.0f, -6029.0f, 12060.0f, 12061.0f, 12062.0f, 12063.0f, 12064.0f, 12065.0f, 12066.0f, 12067.0f, 12068.0f, 12069.0f, 12070.0f, 12071.0f, 12072.0f, 12073.0f, 12074.0f, 12075.0f, 12076.0f, 12077.0f, 12078.0f, 12079.0f, 12080.0f, 12081.0f, 12082.0f, 12083.0f, 12084.0f, 12085.0f, 12086.0f, 12087.0f, 12088.0f, 12089.0f, 12090.0f, 12091.0f, 12092.0f, 12093.0f, 12094.0f, 12095.0f, 12096.0f, 12097.0f, 12098.0f, 12099.0f, 12100.0f, 12101.0f, 12102.0f, 12103.0f, 12104.0f, 12105.0f, 12106.0f, 12107.0f, 12108.0f, 12109.0f, 12110.0f, 12111.0f, 12112.0f, 12113.0f, 12114.0f, 12115.0f, 12116.0f, 12117.0f, 12118.0f, 12119.0f, -6030.0f, -6031.0f, -6032.0f, -6033.0f, -6034.0f, -6035.0f, -6036.0f, -6037.0f, -6038.0f, -6039.0f, -6040.0f, -6041.0f, -6042.0f, -6043.0f, -6044.0f, -6045.0f, -6046.0f, -6047.0f, -6048.0f, -6049.0f, -6050.0f, -6051.0f, -6052.0f, -6053.0f, -6054.0f, -6055.0f, -6056.0f, -6057.0f, -6058.0f, -6059.0f, 12120.0f, 12121.0f, 12122.0f, 12123.0f, 12124.0f, 12125.0f, 12126.0f, 12127.0f, 12128.0f, 12129.0f, 12130.0f, 12131.0f, 12132.0f, 12133.0f, 12134.0f, 12135.0f, 12136.0f, 12137.0f, 12138.0f, 12139.0f, 12140.0f, 12141.0f, 12142.0f, 12143.0f, 12144.0f, 12145.0f, 12146.0f, 12147.0f, 12148.0f, 12149.0f, 12150.0f, 12151.0f, 12152.0f, 12153.0f, 12154.0f, 12155.0f, 12156.0f, 12157.0f, 12158.0f, 12159.0f, 12160.0f, 12161.0f, 12162.0f, 12163.0f, 12164.0f, 12165.0f, 12166.0f, 12167.0f, 12168.0f, 12169.0f, 12170.0f, 12171.0f, 12172.0f, 12173.0f, 12174.0f, 12175.0f, 12176.0f, 12177.0f, 12178.0f, 12179.0f, -6060.0f, -6061.0f, -6062.0f, -6063.0f, -6064.0f, -6065.0f, -6066.0f, -6067.0f, -6068.0f, -6069.0f, -6070.0f, -6071.0f, -6072.0f, -6073.0f, -6074.0f, -6075.0f, -6076.0f, -6077.0f, -6078.0f, -6079.0f, -6080.0f, -6081.0f, -6082.0f, -6083.0f, -6084.0f, -6085.0f, -6086.0f, -6087.0f, -6088.0f, -6089.0f, 12180.0f, 12181.0f, 12182.0f, 12183.0f, 12184.0f, 12185.0f, 12186.0f, 12187.0f, 12188.0f, 12189.0f, 12190.0f, 12191.0f, 12192.0f, 12193.0f, 12194.0f, 12195.0f, 12196.0f, 12197.0f, 12198.0f, 12199.0f, 12200.0f, 12201.0f, 12202.0f, 12203.0f, 12204.0f, 12205.0f, 12206.0f, 12207.0f, 12208.0f, 12209.0f, 12210.0f, 12211.0f, 12212.0f, 12213.0f, 12214.0f, 12215.0f, 12216.0f, 12217.0f, 12218.0f, 12219.0f, 12220.0f, 12221.0f, 12222.0f, 12223.0f, 12224.0f, 12225.0f, 12226.0f, 12227.0f, 12228.0f, 12229.0f, 12230.0f, 12231.0f, 12232.0f, 12233.0f, 12234.0f, 12235.0f, 12236.0f, 12237.0f, 12238.0f, 12239.0f, -6090.0f, -6091.0f, -6092.0f, -6093.0f, -6094.0f, -6095.0f, -6096.0f, -6097.0f, -6098.0f, -6099.0f, -6100.0f, -6101.0f, -6102.0f, -6103.0f, -6104.0f, -6105.0f, -6106.0f, -6107.0f, -6108.0f, -6109.0f, -6110.0f, -6111.0f, -6112.0f, -6113.0f, -6114.0f, -6115.0f, -6116.0f, -6117.0f, -6118.0f, -6119.0f, 12240.0f, 12241.0f, 12242.0f, 12243.0f, 12244.0f, 12245.0f, 12246.0f, 12247.0f, 12248.0f, 12249.0f, 12250.0f, 12251.0f, 12252.0f, 12253.0f, 12254.0f, 12255.0f, 12256.0f, 12257.0f, 12258.0f, 12259.0f, 12260.0f, 12261.0f, 12262.0f, 12263.0f, 12264.0f, 12265.0f, 12266.0f, 12267.0f, 12268.0f, 12269.0f, 12270.0f, 12271.0f, 12272.0f, 12273.0f, 12274.0f, 12275.0f, 12276.0f, 12277.0f, 12278.0f, 12279.0f, 12280.0f, 12281.0f, 12282.0f, 12283.0f, 12284.0f, 12285.0f, 12286.0f, 12287.0f, 12288.0f, 12289.0f, 12290.0f, 12291.0f, 12292.0f, 12293.0f, 12294.0f, 12295.0f, 12296.0f, 12297.0f, 12298.0f, 12299.0f, -6120.0f, -6121.0f, -6122.0f, -6123.0f, -6124.0f, -6125.0f, -6126.0f, -6127.0f, -6128.0f, -6129.0f, -6130.0f, -6131.0f, -6132.0f, -6133.0f, -6134.0f, -6135.0f, -6136.0f, -6137.0f, -6138.0f, -6139.0f, -6140.0f, -6141.0f, -6142.0f, -6143.0f, -6144.0f, -6145.0f, -6146.0f, -6147.0f, -6148.0f, -6149.0f, 12300.0f, 12301.0f, 12302.0f, 12303.0f, 12304.0f, 12305.0f, 12306.0f, 12307.0f, 12308.0f, 12309.0f, 12310.0f, 12311.0f, 12312.0f, 12313.0f, 12314.0f, 12315.0f, 12316.0f, 12317.0f, 12318.0f, 12319.0f, 12320.0f, 12321.0f, 12322.0f, 12323.0f, 12324.0f, 12325.0f, 12326.0f, 12327.0f, 12328.0f, 12329.0f, 12330.0f, 12331.0f, 12332.0f, 12333.0f, 12334.0f, 12335.0f, 12336.0f, 12337.0f, 12338.0f, 12339.0f, 12340.0f, 12341.0f, 12342.0f, 12343.0f, 12344.0f, 12345.0f, 12346.0f, 12347.0f, 12348.0f, 12349.0f, 12350.0f, 12351.0f, 12352.0f, 12353.0f, 12354.0f, 12355.0f, 12356.0f, 12357.0f, 12358.0f, 12359.0f, -6150.0f, -6151.0f, -6152.0f, -6153.0f, -6154.0f, -6155.0f, -6156.0f, -6157.0f, -6158.0f, -6159.0f, -6160.0f, -6161.0f, -6162.0f, -6163.0f, -6164.0f, -6165.0f, -6166.0f, -6167.0f, -6168.0f, -6169.0f, -6170.0f, -6171.0f, -6172.0f, -6173.0f, -6174.0f, -6175.0f, -6176.0f, -6177.0f, -6178.0f, -6179.0f, 12360.0f, 12361.0f, 12362.0f, 12363.0f, 12364.0f, 12365.0f, 12366.0f, 12367.0f, 12368.0f, 12369.0f, 12370.0f, 12371.0f, 12372.0f, 12373.0f, 12374.0f, 12375.0f, 12376.0f, 12377.0f, 12378.0f, 12379.0f, 12380.0f, 12381.0f, 12382.0f, 12383.0f, 12384.0f, 12385.0f, 12386.0f, 12387.0f, 12388.0f, 12389.0f, 12390.0f, 12391.0f, 12392.0f, 12393.0f, 12394.0f, 12395.0f, 12396.0f, 12397.0f, 12398.0f, 12399.0f, 12400.0f, 12401.0f, 12402.0f, 12403.0f, 12404.0f, 12405.0f, 12406.0f, 12407.0f, 12408.0f, 12409.0f, 12410.0f, 12411.0f, 12412.0f, 12413.0f, 12414.0f, 12415.0f, 12416.0f, 12417.0f, 12418.0f, 12419.0f, -6180.0f, -6181.0f, -6182.0f, -6183.0f, -6184.0f, -6185.0f, -6186.0f, -6187.0f, -6188.0f, -6189.0f, -6190.0f, -6191.0f, -6192.0f, -6193.0f, -6194.0f, -6195.0f, -6196.0f, -6197.0f, -6198.0f, -6199.0f, -6200.0f, -6201.0f, -6202.0f, -6203.0f, -6204.0f, -6205.0f, -6206.0f, -6207.0f, -6208.0f, -6209.0f, 12420.0f, 12421.0f, 12422.0f, 12423.0f, 12424.0f, 12425.0f, 12426.0f, 12427.0f, 12428.0f, 12429.0f, 12430.0f, 12431.0f, 12432.0f, 12433.0f, 12434.0f, 12435.0f, 12436.0f, 12437.0f, 12438.0f, 12439.0f, 12440.0f, 12441.0f, 12442.0f, 12443.0f, 12444.0f, 12445.0f, 12446.0f, 12447.0f, 12448.0f, 12449.0f, 12450.0f, 12451.0f, 12452.0f, 12453.0f, 12454.0f, 12455.0f, 12456.0f, 12457.0f, 12458.0f, 12459.0f, 12460.0f, 12461.0f, 12462.0f, 12463.0f, 12464.0f, 12465.0f, 12466.0f, 12467.0f, 12468.0f, 12469.0f, 12470.0f, 12471.0f, 12472.0f, 12473.0f, 12474.0f, 12475.0f, 12476.0f, 12477.0f, 12478.0f, 12479.0f, -6210.0f, -6211.0f, -6212.0f, -6213.0f, -6214.0f, -6215.0f, -6216.0f, -6217.0f, -6218.0f, -6219.0f, -6220.0f, -6221.0f, -6222.0f, -6223.0f, -6224.0f, -6225.0f, -6226.0f, -6227.0f, -6228.0f, -6229.0f, -6230.0f, -6231.0f, -6232.0f, -6233.0f, -6234.0f, -6235.0f, -6236.0f, -6237.0f, -6238.0f, -6239.0f, 12480.0f, 12481.0f, 12482.0f, 12483.0f, 12484.0f, 12485.0f, 12486.0f, 12487.0f, 12488.0f, 12489.0f, 12490.0f, 12491.0f, 12492.0f, 12493.0f, 12494.0f, 12495.0f, 12496.0f, 12497.0f, 12498.0f, 12499.0f, 12500.0f, 12501.0f, 12502.0f, 12503.0f, 12504.0f, 12505.0f, 12506.0f, 12507.0f, 12508.0f, 12509.0f, 12510.0f, 12511.0f, 12512.0f, 12513.0f, 12514.0f, 12515.0f, 12516.0f, 12517.0f, 12518.0f, 12519.0f, 12520.0f, 12521.0f, 12522.0f, 12523.0f, 12524.0f, 12525.0f, 12526.0f, 12527.0f, 12528.0f, 12529.0f, 12530.0f, 12531.0f, 12532.0f, 12533.0f, 12534.0f, 12535.0f, 12536.0f, 12537.0f, 12538.0f, 12539.0f, -6240.0f, -6241.0f, -6242.0f, -6243.0f, -6244.0f, -6245.0f, -6246.0f, -6247.0f, -6248.0f, -6249.0f, -6250.0f, -6251.0f, -6252.0f, -6253.0f, -6254.0f, -6255.0f, -6256.0f, -6257.0f, -6258.0f, -6259.0f, -6260.0f, -6261.0f, -6262.0f, -6263.0f, -6264.0f, -6265.0f, -6266.0f, -6267.0f, -6268.0f, -6269.0f, 12540.0f, 12541.0f, 12542.0f, 12543.0f, 12544.0f, 12545.0f, 12546.0f, 12547.0f, 12548.0f, 12549.0f, 12550.0f, 12551.0f, 12552.0f, 12553.0f, 12554.0f, 12555.0f, 12556.0f, 12557.0f, 12558.0f, 12559.0f, 12560.0f, 12561.0f, 12562.0f, 12563.0f, 12564.0f, 12565.0f, 12566.0f, 12567.0f, 12568.0f, 12569.0f, 12570.0f, 12571.0f, 12572.0f, 12573.0f, 12574.0f, 12575.0f, 12576.0f, 12577.0f, 12578.0f, 12579.0f, 12580.0f, 12581.0f, 12582.0f, 12583.0f, 12584.0f, 12585.0f, 12586.0f, 12587.0f, 12588.0f, 12589.0f, 12590.0f, 12591.0f, 12592.0f, 12593.0f, 12594.0f, 12595.0f, 12596.0f, 12597.0f, 12598.0f, 12599.0f, -6270.0f, -6271.0f, -6272.0f, -6273.0f, -6274.0f, -6275.0f, -6276.0f, -6277.0f, -6278.0f, -6279.0f, -6280.0f, -6281.0f, -6282.0f, -6283.0f, -6284.0f, -6285.0f, -6286.0f, -6287.0f, -6288.0f, -6289.0f, -6290.0f, -6291.0f, -6292.0f, -6293.0f, -6294.0f, -6295.0f, -6296.0f, -6297.0f, -6298.0f, -6299.0f, 12600.0f, 12601.0f, 12602.0f, 12603.0f, 12604.0f, 12605.0f, 12606.0f, 12607.0f, 12608.0f, 12609.0f, 12610.0f, 12611.0f, 12612.0f, 12613.0f, 12614.0f, 12615.0f, 12616.0f, 12617.0f, 12618.0f, 12619.0f, 12620.0f, 12621.0f, 12622.0f, 12623.0f, 12624.0f, 12625.0f, 12626.0f, 12627.0f, 12628.0f, 12629.0f, 12630.0f, 12631.0f, 12632.0f, 12633.0f, 12634.0f, 12635.0f, 12636.0f, 12637.0f, 12638.0f, 12639.0f, 12640.0f, 12641.0f, 12642.0f, 12643.0f, 12644.0f, 12645.0f, 12646.0f, 12647.0f, 12648.0f, 12649.0f, 12650.0f, 12651.0f, 12652.0f, 12653.0f, 12654.0f, 12655.0f, 12656.0f, 12657.0f, 12658.0f, 12659.0f, -6300.0f, -6301.0f, -6302.0f, -6303.0f, -6304.0f, -6305.0f, -6306.0f, -6307.0f, -6308.0f, -6309.0f, -6310.0f, -6311.0f, -6312.0f, -6313.0f, -6314.0f, -6315.0f, -6316.0f, -6317.0f, -6318.0f, -6319.0f, -6320.0f, -6321.0f, -6322.0f, -6323.0f, -6324.0f, -6325.0f, -6326.0f, -6327.0f, -6328.0f, -6329.0f, 12660.0f, 12661.0f, 12662.0f, 12663.0f, 12664.0f, 12665.0f, 12666.0f, 12667.0f, 12668.0f, 12669.0f, 12670.0f, 12671.0f, 12672.0f, 12673.0f, 12674.0f, 12675.0f, 12676.0f, 12677.0f, 12678.0f, 12679.0f, 12680.0f, 12681.0f, 12682.0f, 12683.0f, 12684.0f, 12685.0f, 12686.0f, 12687.0f, 12688.0f, 12689.0f, 12690.0f, 12691.0f, 12692.0f, 12693.0f, 12694.0f, 12695.0f, 12696.0f, 12697.0f, 12698.0f, 12699.0f, 12700.0f, 12701.0f, 12702.0f, 12703.0f, 12704.0f, 12705.0f, 12706.0f, 12707.0f, 12708.0f, 12709.0f, 12710.0f, 12711.0f, 12712.0f, 12713.0f, 12714.0f, 12715.0f, 12716.0f, 12717.0f, 12718.0f, 12719.0f, -6330.0f, -6331.0f, -6332.0f, -6333.0f, -6334.0f, -6335.0f, -6336.0f, -6337.0f, -6338.0f, -6339.0f, -6340.0f, -6341.0f, -6342.0f, -6343.0f, -6344.0f, -6345.0f, -6346.0f, -6347.0f, -6348.0f, -6349.0f, -6350.0f, -6351.0f, -6352.0f, -6353.0f, -6354.0f, -6355.0f, -6356.0f, -6357.0f, -6358.0f, -6359.0f}}},
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/concat_float_1.example.cpp b/nn/runtime/test/generated/examples/concat_float_1.example.cpp
index 0f339fe..22feb3b 100644
--- a/nn/runtime/test/generated/examples/concat_float_1.example.cpp
+++ b/nn/runtime/test/generated/examples/concat_float_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: concat_float_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/concat_float_1_relaxed.example.cpp b/nn/runtime/test/generated/examples/concat_float_1_relaxed.example.cpp
index 81702e0..ffb90b3 100644
--- a/nn/runtime/test/generated/examples/concat_float_1_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/concat_float_1_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: concat_float_1_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/concat_float_2.example.cpp b/nn/runtime/test/generated/examples/concat_float_2.example.cpp
index e7c801c..9772951 100644
--- a/nn/runtime/test/generated/examples/concat_float_2.example.cpp
+++ b/nn/runtime/test/generated/examples/concat_float_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: concat_float_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/concat_float_2_relaxed.example.cpp b/nn/runtime/test/generated/examples/concat_float_2_relaxed.example.cpp
index f8bd8fd..aaa3139 100644
--- a/nn/runtime/test/generated/examples/concat_float_2_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/concat_float_2_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: concat_float_2_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/concat_float_3.example.cpp b/nn/runtime/test/generated/examples/concat_float_3.example.cpp
index 721ff3e..338ee05 100644
--- a/nn/runtime/test/generated/examples/concat_float_3.example.cpp
+++ b/nn/runtime/test/generated/examples/concat_float_3.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: concat_float_3.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/concat_float_3_relaxed.example.cpp b/nn/runtime/test/generated/examples/concat_float_3_relaxed.example.cpp
index f546615..c24d50c 100644
--- a/nn/runtime/test/generated/examples/concat_float_3_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/concat_float_3_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: concat_float_3_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/concat_mixed_quant.example.cpp b/nn/runtime/test/generated/examples/concat_mixed_quant.example.cpp
new file mode 100644
index 0000000..529828d
--- /dev/null
+++ b/nn/runtime/test/generated/examples/concat_mixed_quant.example.cpp
@@ -0,0 +1,76 @@
+// clang-format off
+// Generated file (from: concat_mixed_quant.mod.py). Do not edit
+std::vector<MixedTypedExample>& get_examples_quant8() {
+static std::vector<MixedTypedExample> examples_quant8 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {139, 91, 79, 44}}, {1, {22, 62, 82, 142}}, {2, {136, 87, 76, 204}}, {3, {45, 114, 148, 252}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {137, 97, 138, 158, 139, 95, 140, 160, 87, 57, 168, 198, 85, 199, 170, 200}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_quant8;
+};
+
+std::vector<MixedTypedExample>& get_examples_quant8_2() {
+static std::vector<MixedTypedExample> examples_quant8_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {139, 91, 79, 44}}, {1, {22, 62, 82, 142}}, {2, {136, 87, 76, 204}}, {3, {45, 114, 148, 252}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {255, 0, 255, 255, 255, 0, 255, 255, 0, 0, 255, 255, 0, 255, 255, 255}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_quant8_2;
+};
+
diff --git a/nn/runtime/test/generated/examples/concat_quant8_1.example.cpp b/nn/runtime/test/generated/examples/concat_quant8_1.example.cpp
index 145f6b1..a0a064d 100644
--- a/nn/runtime/test/generated/examples/concat_quant8_1.example.cpp
+++ b/nn/runtime/test/generated/examples/concat_quant8_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: concat_quant8_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/concat_quant8_2.example.cpp b/nn/runtime/test/generated/examples/concat_quant8_2.example.cpp
index fe8c84a..6fb37b3 100644
--- a/nn/runtime/test/generated/examples/concat_quant8_2.example.cpp
+++ b/nn/runtime/test/generated/examples/concat_quant8_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: concat_quant8_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/concat_quant8_3.example.cpp b/nn/runtime/test/generated/examples/concat_quant8_3.example.cpp
index c6c4c6c..317d204 100644
--- a/nn/runtime/test/generated/examples/concat_quant8_3.example.cpp
+++ b/nn/runtime/test/generated/examples/concat_quant8_3.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: concat_quant8_3.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv2d_v1_2.example.cpp b/nn/runtime/test/generated/examples/conv2d_v1_2.example.cpp
index c4fbc1e..d6d67ab 100644
--- a/nn/runtime/test/generated/examples/conv2d_v1_2.example.cpp
+++ b/nn/runtime/test/generated/examples/conv2d_v1_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv2d_v1_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples_nhwc = {
+std::vector<MixedTypedExample>& get_examples_nhwc() {
+static std::vector<MixedTypedExample> examples_nhwc = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +108,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nhwc_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nhwc_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_weight_as_input_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nhwc_weight_as_input_relaxed() {
+static std::vector<MixedTypedExample> examples_nhwc_weight_as_input_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -169,8 +182,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_weight_as_input_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_weight_as_input_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_weight_as_input_quant8() {
+static std::vector<MixedTypedExample> examples_nhwc_weight_as_input_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -203,8 +219,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_weight_as_input_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nchw = {
+std::vector<MixedTypedExample>& get_examples_nchw() {
+static std::vector<MixedTypedExample> examples_nchw = {
 // Begin of an example
 {
 .operands = {
@@ -237,8 +256,11 @@
 },
 }, // End of an example
 };
+return examples_nchw;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -271,8 +293,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nchw_quant8() {
+static std::vector<MixedTypedExample> examples_nchw_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -305,8 +330,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nchw_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nchw_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nchw_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -339,8 +367,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nchw_weight_as_input_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nchw_weight_as_input_relaxed() {
+static std::vector<MixedTypedExample> examples_nchw_weight_as_input_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -373,8 +404,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_weight_as_input_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nchw_weight_as_input_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nchw_weight_as_input_quant8() {
+static std::vector<MixedTypedExample> examples_nchw_weight_as_input_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -407,8 +441,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_weight_as_input_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_2() {
+static std::vector<MixedTypedExample> examples_nhwc_2 = {
 // Begin of an example
 {
 .operands = {
@@ -441,8 +478,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -475,8 +515,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8_2() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -509,8 +552,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_weight_as_input_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_weight_as_input_2() {
+static std::vector<MixedTypedExample> examples_nhwc_weight_as_input_2 = {
 // Begin of an example
 {
 .operands = {
@@ -543,8 +589,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_weight_as_input_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_weight_as_input_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_weight_as_input_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nhwc_weight_as_input_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -577,8 +626,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_weight_as_input_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_weight_as_input_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_weight_as_input_quant8_2() {
+static std::vector<MixedTypedExample> examples_nhwc_weight_as_input_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -611,8 +663,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_weight_as_input_quant8_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_2() {
+static std::vector<MixedTypedExample> examples_nchw_2 = {
 // Begin of an example
 {
 .operands = {
@@ -645,8 +700,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -679,8 +737,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_quant8_2() {
+static std::vector<MixedTypedExample> examples_nchw_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -713,8 +774,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_weight_as_input_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_weight_as_input_2() {
+static std::vector<MixedTypedExample> examples_nchw_weight_as_input_2 = {
 // Begin of an example
 {
 .operands = {
@@ -747,8 +811,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_weight_as_input_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_weight_as_input_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_weight_as_input_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nchw_weight_as_input_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -781,8 +848,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_weight_as_input_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_weight_as_input_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_weight_as_input_quant8_2() {
+static std::vector<MixedTypedExample> examples_nchw_weight_as_input_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -815,8 +885,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_weight_as_input_quant8_2;
+};
 
-std::vector<MixedTypedExample> examples_channel_nhwc = {
+std::vector<MixedTypedExample>& get_examples_channel_nhwc() {
+static std::vector<MixedTypedExample> examples_channel_nhwc = {
 // Begin of an example
 {
 .operands = {
@@ -849,8 +922,11 @@
 },
 }, // End of an example
 };
+return examples_channel_nhwc;
+};
 
-std::vector<MixedTypedExample> examples_channel_nhwc_relaxed = {
+std::vector<MixedTypedExample>& get_examples_channel_nhwc_relaxed() {
+static std::vector<MixedTypedExample> examples_channel_nhwc_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -883,8 +959,11 @@
 },
 }, // End of an example
 };
+return examples_channel_nhwc_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_channel_nhwc_quant8 = {
+std::vector<MixedTypedExample>& get_examples_channel_nhwc_quant8() {
+static std::vector<MixedTypedExample> examples_channel_nhwc_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -917,8 +996,11 @@
 },
 }, // End of an example
 };
+return examples_channel_nhwc_quant8;
+};
 
-std::vector<MixedTypedExample> examples_channel_nhwc_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_channel_nhwc_weight_as_input() {
+static std::vector<MixedTypedExample> examples_channel_nhwc_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -951,8 +1033,11 @@
 },
 }, // End of an example
 };
+return examples_channel_nhwc_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_channel_nhwc_weight_as_input_relaxed = {
+std::vector<MixedTypedExample>& get_examples_channel_nhwc_weight_as_input_relaxed() {
+static std::vector<MixedTypedExample> examples_channel_nhwc_weight_as_input_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -985,8 +1070,11 @@
 },
 }, // End of an example
 };
+return examples_channel_nhwc_weight_as_input_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_channel_nhwc_weight_as_input_quant8 = {
+std::vector<MixedTypedExample>& get_examples_channel_nhwc_weight_as_input_quant8() {
+static std::vector<MixedTypedExample> examples_channel_nhwc_weight_as_input_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -1019,8 +1107,11 @@
 },
 }, // End of an example
 };
+return examples_channel_nhwc_weight_as_input_quant8;
+};
 
-std::vector<MixedTypedExample> examples_channel_nchw = {
+std::vector<MixedTypedExample>& get_examples_channel_nchw() {
+static std::vector<MixedTypedExample> examples_channel_nchw = {
 // Begin of an example
 {
 .operands = {
@@ -1053,8 +1144,11 @@
 },
 }, // End of an example
 };
+return examples_channel_nchw;
+};
 
-std::vector<MixedTypedExample> examples_channel_nchw_relaxed = {
+std::vector<MixedTypedExample>& get_examples_channel_nchw_relaxed() {
+static std::vector<MixedTypedExample> examples_channel_nchw_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -1087,8 +1181,11 @@
 },
 }, // End of an example
 };
+return examples_channel_nchw_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_channel_nchw_quant8 = {
+std::vector<MixedTypedExample>& get_examples_channel_nchw_quant8() {
+static std::vector<MixedTypedExample> examples_channel_nchw_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -1121,8 +1218,11 @@
 },
 }, // End of an example
 };
+return examples_channel_nchw_quant8;
+};
 
-std::vector<MixedTypedExample> examples_channel_nchw_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_channel_nchw_weight_as_input() {
+static std::vector<MixedTypedExample> examples_channel_nchw_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1155,8 +1255,11 @@
 },
 }, // End of an example
 };
+return examples_channel_nchw_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_channel_nchw_weight_as_input_relaxed = {
+std::vector<MixedTypedExample>& get_examples_channel_nchw_weight_as_input_relaxed() {
+static std::vector<MixedTypedExample> examples_channel_nchw_weight_as_input_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -1189,8 +1292,11 @@
 },
 }, // End of an example
 };
+return examples_channel_nchw_weight_as_input_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_channel_nchw_weight_as_input_quant8 = {
+std::vector<MixedTypedExample>& get_examples_channel_nchw_weight_as_input_quant8() {
+static std::vector<MixedTypedExample> examples_channel_nchw_weight_as_input_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -1223,8 +1329,11 @@
 },
 }, // End of an example
 };
+return examples_channel_nchw_weight_as_input_quant8;
+};
 
-std::vector<MixedTypedExample> examples_large_nhwc = {
+std::vector<MixedTypedExample>& get_examples_large_nhwc() {
+static std::vector<MixedTypedExample> examples_large_nhwc = {
 // Begin of an example
 {
 .operands = {
@@ -1257,8 +1366,11 @@
 },
 }, // End of an example
 };
+return examples_large_nhwc;
+};
 
-std::vector<MixedTypedExample> examples_large_nhwc_relaxed = {
+std::vector<MixedTypedExample>& get_examples_large_nhwc_relaxed() {
+static std::vector<MixedTypedExample> examples_large_nhwc_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -1291,8 +1403,11 @@
 },
 }, // End of an example
 };
+return examples_large_nhwc_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_large_nhwc_quant8 = {
+std::vector<MixedTypedExample>& get_examples_large_nhwc_quant8() {
+static std::vector<MixedTypedExample> examples_large_nhwc_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -1325,8 +1440,11 @@
 },
 }, // End of an example
 };
+return examples_large_nhwc_quant8;
+};
 
-std::vector<MixedTypedExample> examples_large_nhwc_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_large_nhwc_weight_as_input() {
+static std::vector<MixedTypedExample> examples_large_nhwc_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1359,8 +1477,11 @@
 },
 }, // End of an example
 };
+return examples_large_nhwc_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_large_nhwc_weight_as_input_relaxed = {
+std::vector<MixedTypedExample>& get_examples_large_nhwc_weight_as_input_relaxed() {
+static std::vector<MixedTypedExample> examples_large_nhwc_weight_as_input_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -1393,8 +1514,11 @@
 },
 }, // End of an example
 };
+return examples_large_nhwc_weight_as_input_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_large_nhwc_weight_as_input_quant8 = {
+std::vector<MixedTypedExample>& get_examples_large_nhwc_weight_as_input_quant8() {
+static std::vector<MixedTypedExample> examples_large_nhwc_weight_as_input_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -1427,8 +1551,11 @@
 },
 }, // End of an example
 };
+return examples_large_nhwc_weight_as_input_quant8;
+};
 
-std::vector<MixedTypedExample> examples_large_nchw = {
+std::vector<MixedTypedExample>& get_examples_large_nchw() {
+static std::vector<MixedTypedExample> examples_large_nchw = {
 // Begin of an example
 {
 .operands = {
@@ -1461,8 +1588,11 @@
 },
 }, // End of an example
 };
+return examples_large_nchw;
+};
 
-std::vector<MixedTypedExample> examples_large_nchw_relaxed = {
+std::vector<MixedTypedExample>& get_examples_large_nchw_relaxed() {
+static std::vector<MixedTypedExample> examples_large_nchw_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -1495,8 +1625,11 @@
 },
 }, // End of an example
 };
+return examples_large_nchw_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_large_nchw_quant8 = {
+std::vector<MixedTypedExample>& get_examples_large_nchw_quant8() {
+static std::vector<MixedTypedExample> examples_large_nchw_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -1529,8 +1662,11 @@
 },
 }, // End of an example
 };
+return examples_large_nchw_quant8;
+};
 
-std::vector<MixedTypedExample> examples_large_nchw_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_large_nchw_weight_as_input() {
+static std::vector<MixedTypedExample> examples_large_nchw_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1563,8 +1699,11 @@
 },
 }, // End of an example
 };
+return examples_large_nchw_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_large_nchw_weight_as_input_relaxed = {
+std::vector<MixedTypedExample>& get_examples_large_nchw_weight_as_input_relaxed() {
+static std::vector<MixedTypedExample> examples_large_nchw_weight_as_input_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -1597,8 +1736,11 @@
 },
 }, // End of an example
 };
+return examples_large_nchw_weight_as_input_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_large_nchw_weight_as_input_quant8 = {
+std::vector<MixedTypedExample>& get_examples_large_nchw_weight_as_input_quant8() {
+static std::vector<MixedTypedExample> examples_large_nchw_weight_as_input_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -1631,8 +1773,11 @@
 },
 }, // End of an example
 };
+return examples_large_nchw_weight_as_input_quant8;
+};
 
-std::vector<MixedTypedExample> examples_1_H3_W2_SAME_nhwc = {
+std::vector<MixedTypedExample>& get_examples_1_H3_W2_SAME_nhwc() {
+static std::vector<MixedTypedExample> examples_1_H3_W2_SAME_nhwc = {
 // Begin of an example
 {
 .operands = {
@@ -1696,8 +1841,11 @@
 },
 }, // End of an example
 };
+return examples_1_H3_W2_SAME_nhwc;
+};
 
-std::vector<MixedTypedExample> examples_1_H3_W2_SAME_nhwc_relaxed = {
+std::vector<MixedTypedExample>& get_examples_1_H3_W2_SAME_nhwc_relaxed() {
+static std::vector<MixedTypedExample> examples_1_H3_W2_SAME_nhwc_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -1761,8 +1909,11 @@
 },
 }, // End of an example
 };
+return examples_1_H3_W2_SAME_nhwc_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_1_H3_W2_SAME_nchw = {
+std::vector<MixedTypedExample>& get_examples_1_H3_W2_SAME_nchw() {
+static std::vector<MixedTypedExample> examples_1_H3_W2_SAME_nchw = {
 // Begin of an example
 {
 .operands = {
@@ -1826,8 +1977,11 @@
 },
 }, // End of an example
 };
+return examples_1_H3_W2_SAME_nchw;
+};
 
-std::vector<MixedTypedExample> examples_1_H3_W2_SAME_nchw_relaxed = {
+std::vector<MixedTypedExample>& get_examples_1_H3_W2_SAME_nchw_relaxed() {
+static std::vector<MixedTypedExample> examples_1_H3_W2_SAME_nchw_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -1891,8 +2045,11 @@
 },
 }, // End of an example
 };
+return examples_1_H3_W2_SAME_nchw_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_1_H3_W2_VALID_nhwc = {
+std::vector<MixedTypedExample>& get_examples_1_H3_W2_VALID_nhwc() {
+static std::vector<MixedTypedExample> examples_1_H3_W2_VALID_nhwc = {
 // Begin of an example
 {
 .operands = {
@@ -1956,8 +2113,11 @@
 },
 }, // End of an example
 };
+return examples_1_H3_W2_VALID_nhwc;
+};
 
-std::vector<MixedTypedExample> examples_1_H3_W2_VALID_nhwc_relaxed = {
+std::vector<MixedTypedExample>& get_examples_1_H3_W2_VALID_nhwc_relaxed() {
+static std::vector<MixedTypedExample> examples_1_H3_W2_VALID_nhwc_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -2021,8 +2181,11 @@
 },
 }, // End of an example
 };
+return examples_1_H3_W2_VALID_nhwc_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_1_H3_W2_VALID_nchw = {
+std::vector<MixedTypedExample>& get_examples_1_H3_W2_VALID_nchw() {
+static std::vector<MixedTypedExample> examples_1_H3_W2_VALID_nchw = {
 // Begin of an example
 {
 .operands = {
@@ -2086,8 +2249,11 @@
 },
 }, // End of an example
 };
+return examples_1_H3_W2_VALID_nchw;
+};
 
-std::vector<MixedTypedExample> examples_1_H3_W2_VALID_nchw_relaxed = {
+std::vector<MixedTypedExample>& get_examples_1_H3_W2_VALID_nchw_relaxed() {
+static std::vector<MixedTypedExample> examples_1_H3_W2_VALID_nchw_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -2151,8 +2317,11 @@
 },
 }, // End of an example
 };
+return examples_1_H3_W2_VALID_nchw_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_3_H3_W2_SAME_nhwc = {
+std::vector<MixedTypedExample>& get_examples_3_H3_W2_SAME_nhwc() {
+static std::vector<MixedTypedExample> examples_3_H3_W2_SAME_nhwc = {
 // Begin of an example
 {
 .operands = {
@@ -2216,8 +2385,11 @@
 },
 }, // End of an example
 };
+return examples_3_H3_W2_SAME_nhwc;
+};
 
-std::vector<MixedTypedExample> examples_3_H3_W2_SAME_nhwc_relaxed = {
+std::vector<MixedTypedExample>& get_examples_3_H3_W2_SAME_nhwc_relaxed() {
+static std::vector<MixedTypedExample> examples_3_H3_W2_SAME_nhwc_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -2281,8 +2453,11 @@
 },
 }, // End of an example
 };
+return examples_3_H3_W2_SAME_nhwc_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_3_H3_W2_SAME_nchw = {
+std::vector<MixedTypedExample>& get_examples_3_H3_W2_SAME_nchw() {
+static std::vector<MixedTypedExample> examples_3_H3_W2_SAME_nchw = {
 // Begin of an example
 {
 .operands = {
@@ -2346,8 +2521,11 @@
 },
 }, // End of an example
 };
+return examples_3_H3_W2_SAME_nchw;
+};
 
-std::vector<MixedTypedExample> examples_3_H3_W2_SAME_nchw_relaxed = {
+std::vector<MixedTypedExample>& get_examples_3_H3_W2_SAME_nchw_relaxed() {
+static std::vector<MixedTypedExample> examples_3_H3_W2_SAME_nchw_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -2411,8 +2589,11 @@
 },
 }, // End of an example
 };
+return examples_3_H3_W2_SAME_nchw_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_3_H3_W2_VALID_nhwc = {
+std::vector<MixedTypedExample>& get_examples_3_H3_W2_VALID_nhwc() {
+static std::vector<MixedTypedExample> examples_3_H3_W2_VALID_nhwc = {
 // Begin of an example
 {
 .operands = {
@@ -2476,8 +2657,11 @@
 },
 }, // End of an example
 };
+return examples_3_H3_W2_VALID_nhwc;
+};
 
-std::vector<MixedTypedExample> examples_3_H3_W2_VALID_nhwc_relaxed = {
+std::vector<MixedTypedExample>& get_examples_3_H3_W2_VALID_nhwc_relaxed() {
+static std::vector<MixedTypedExample> examples_3_H3_W2_VALID_nhwc_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -2541,8 +2725,11 @@
 },
 }, // End of an example
 };
+return examples_3_H3_W2_VALID_nhwc_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_3_H3_W2_VALID_nchw = {
+std::vector<MixedTypedExample>& get_examples_3_H3_W2_VALID_nchw() {
+static std::vector<MixedTypedExample> examples_3_H3_W2_VALID_nchw = {
 // Begin of an example
 {
 .operands = {
@@ -2606,8 +2793,11 @@
 },
 }, // End of an example
 };
+return examples_3_H3_W2_VALID_nchw;
+};
 
-std::vector<MixedTypedExample> examples_3_H3_W2_VALID_nchw_relaxed = {
+std::vector<MixedTypedExample>& get_examples_3_H3_W2_VALID_nchw_relaxed() {
+static std::vector<MixedTypedExample> examples_3_H3_W2_VALID_nchw_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -2671,4 +2861,6 @@
 },
 }, // End of an example
 };
+return examples_3_H3_W2_VALID_nchw_relaxed;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_1_h3_w2_SAME.example.cpp b/nn/runtime/test/generated/examples/conv_1_h3_w2_SAME.example.cpp
index 276874a..b31f825 100644
--- a/nn/runtime/test/generated/examples/conv_1_h3_w2_SAME.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_1_h3_w2_SAME.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_1_h3_w2_SAME.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -64,4 +65,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_1_h3_w2_SAME_relaxed.example.cpp b/nn/runtime/test/generated/examples/conv_1_h3_w2_SAME_relaxed.example.cpp
index 9526d36..4ee9bed 100644
--- a/nn/runtime/test/generated/examples/conv_1_h3_w2_SAME_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_1_h3_w2_SAME_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_1_h3_w2_SAME_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -64,4 +65,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_1_h3_w2_VALID.example.cpp b/nn/runtime/test/generated/examples/conv_1_h3_w2_VALID.example.cpp
index db5f6d1..7b48db8 100644
--- a/nn/runtime/test/generated/examples/conv_1_h3_w2_VALID.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_1_h3_w2_VALID.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_1_h3_w2_VALID.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -64,4 +65,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_1_h3_w2_VALID_relaxed.example.cpp b/nn/runtime/test/generated/examples/conv_1_h3_w2_VALID_relaxed.example.cpp
index 502f101..11d3df3 100644
--- a/nn/runtime/test/generated/examples/conv_1_h3_w2_VALID_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_1_h3_w2_VALID_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_1_h3_w2_VALID_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -64,4 +65,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_3_h3_w2_SAME.example.cpp b/nn/runtime/test/generated/examples/conv_3_h3_w2_SAME.example.cpp
index cbac90e..8586856 100644
--- a/nn/runtime/test/generated/examples/conv_3_h3_w2_SAME.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_3_h3_w2_SAME.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_3_h3_w2_SAME.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -64,4 +65,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_3_h3_w2_SAME_relaxed.example.cpp b/nn/runtime/test/generated/examples/conv_3_h3_w2_SAME_relaxed.example.cpp
index d103091..b3c091f 100644
--- a/nn/runtime/test/generated/examples/conv_3_h3_w2_SAME_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_3_h3_w2_SAME_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_3_h3_w2_SAME_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -64,4 +65,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_3_h3_w2_VALID.example.cpp b/nn/runtime/test/generated/examples/conv_3_h3_w2_VALID.example.cpp
index afac024..09a3ea5 100644
--- a/nn/runtime/test/generated/examples/conv_3_h3_w2_VALID.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_3_h3_w2_VALID.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_3_h3_w2_VALID.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -64,4 +65,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_3_h3_w2_VALID_relaxed.example.cpp b/nn/runtime/test/generated/examples/conv_3_h3_w2_VALID_relaxed.example.cpp
index ca85ffe..3bc73ab 100644
--- a/nn/runtime/test/generated/examples/conv_3_h3_w2_VALID_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_3_h3_w2_VALID_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_3_h3_w2_VALID_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -64,4 +65,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_float.example.cpp b/nn/runtime/test/generated/examples/conv_float.example.cpp
index 1818e0d..4d8a66f 100644
--- a/nn/runtime/test/generated/examples/conv_float.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_float.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_float.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_float_2.example.cpp b/nn/runtime/test/generated/examples/conv_float_2.example.cpp
index b26a094..61f6d8a 100644
--- a/nn/runtime/test/generated/examples/conv_float_2.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_float_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_float_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_float_2_relaxed.example.cpp b/nn/runtime/test/generated/examples/conv_float_2_relaxed.example.cpp
index b10cf5f..5557cb8 100644
--- a/nn/runtime/test/generated/examples/conv_float_2_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_float_2_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_float_2_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_float_channels.example.cpp b/nn/runtime/test/generated/examples/conv_float_channels.example.cpp
index 5ae0b3c..04633c7 100644
--- a/nn/runtime/test/generated/examples/conv_float_channels.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_float_channels.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_float_channels.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_float_channels_relaxed.example.cpp b/nn/runtime/test/generated/examples/conv_float_channels_relaxed.example.cpp
index af93fbc..497ed92 100644
--- a/nn/runtime/test/generated/examples/conv_float_channels_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_float_channels_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_float_channels_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_float_channels_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/conv_float_channels_weights_as_inputs.example.cpp
index 97a8160..e9b158d 100644
--- a/nn/runtime/test/generated/examples/conv_float_channels_weights_as_inputs.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_float_channels_weights_as_inputs.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_float_channels_weights_as_inputs.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_float_channels_weights_as_inputs_relaxed.example.cpp b/nn/runtime/test/generated/examples/conv_float_channels_weights_as_inputs_relaxed.example.cpp
index a8b993b..eb6581d 100644
--- a/nn/runtime/test/generated/examples/conv_float_channels_weights_as_inputs_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_float_channels_weights_as_inputs_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_float_channels_weights_as_inputs_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_float_large.example.cpp b/nn/runtime/test/generated/examples/conv_float_large.example.cpp
index f2ecb28..c126aee 100644
--- a/nn/runtime/test/generated/examples/conv_float_large.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_float_large.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_float_large.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_float_large_relaxed.example.cpp b/nn/runtime/test/generated/examples/conv_float_large_relaxed.example.cpp
index e6575ce..1588633 100644
--- a/nn/runtime/test/generated/examples/conv_float_large_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_float_large_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_float_large_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_float_large_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/conv_float_large_weights_as_inputs.example.cpp
index 878e8ec..2e1e42f 100644
--- a/nn/runtime/test/generated/examples/conv_float_large_weights_as_inputs.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_float_large_weights_as_inputs.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_float_large_weights_as_inputs.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_float_large_weights_as_inputs_relaxed.example.cpp b/nn/runtime/test/generated/examples/conv_float_large_weights_as_inputs_relaxed.example.cpp
index 29c692a..23626f0 100644
--- a/nn/runtime/test/generated/examples/conv_float_large_weights_as_inputs_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_float_large_weights_as_inputs_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_float_large_weights_as_inputs_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_float_relaxed.example.cpp b/nn/runtime/test/generated/examples/conv_float_relaxed.example.cpp
index 26633d1..b4cc604 100644
--- a/nn/runtime/test/generated/examples/conv_float_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_float_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_float_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_float_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/conv_float_weights_as_inputs.example.cpp
index 8168d7d..38a57c0 100644
--- a/nn/runtime/test/generated/examples/conv_float_weights_as_inputs.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_float_weights_as_inputs.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_float_weights_as_inputs.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_float_weights_as_inputs_relaxed.example.cpp b/nn/runtime/test/generated/examples/conv_float_weights_as_inputs_relaxed.example.cpp
index 368643a..8e94886 100644
--- a/nn/runtime/test/generated/examples/conv_float_weights_as_inputs_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_float_weights_as_inputs_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_float_weights_as_inputs_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_quant8.example.cpp b/nn/runtime/test/generated/examples/conv_quant8.example.cpp
index 110e87e..c420c9e 100644
--- a/nn/runtime/test/generated/examples/conv_quant8.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_quant8.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_quant8.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_quant8_2.example.cpp b/nn/runtime/test/generated/examples/conv_quant8_2.example.cpp
index 8573fee..f0511b3 100644
--- a/nn/runtime/test/generated/examples/conv_quant8_2.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_quant8_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_quant8_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_quant8_channels.example.cpp b/nn/runtime/test/generated/examples/conv_quant8_channels.example.cpp
index 945e999..6aad156 100644
--- a/nn/runtime/test/generated/examples/conv_quant8_channels.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_quant8_channels.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_quant8_channels.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_quant8_channels_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/conv_quant8_channels_weights_as_inputs.example.cpp
index 940c43a..aaf68d3 100644
--- a/nn/runtime/test/generated/examples/conv_quant8_channels_weights_as_inputs.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_quant8_channels_weights_as_inputs.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_quant8_channels_weights_as_inputs.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_quant8_large.example.cpp b/nn/runtime/test/generated/examples/conv_quant8_large.example.cpp
index 450c227..b84f851 100644
--- a/nn/runtime/test/generated/examples/conv_quant8_large.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_quant8_large.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_quant8_large.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_quant8_large_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/conv_quant8_large_weights_as_inputs.example.cpp
index 1e7788d..c70c07c 100644
--- a/nn/runtime/test/generated/examples/conv_quant8_large_weights_as_inputs.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_quant8_large_weights_as_inputs.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_quant8_large_weights_as_inputs.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_quant8_overflow.example.cpp b/nn/runtime/test/generated/examples/conv_quant8_overflow.example.cpp
index bbfe27a..afad37a 100644
--- a/nn/runtime/test/generated/examples/conv_quant8_overflow.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_quant8_overflow.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_quant8_overflow.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_quant8_overflow_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/conv_quant8_overflow_weights_as_inputs.example.cpp
index 2740913..646a96a 100644
--- a/nn/runtime/test/generated/examples/conv_quant8_overflow_weights_as_inputs.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_quant8_overflow_weights_as_inputs.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_quant8_overflow_weights_as_inputs.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/conv_quant8_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/conv_quant8_weights_as_inputs.example.cpp
index a09c2aa..f337667 100644
--- a/nn/runtime/test/generated/examples/conv_quant8_weights_as_inputs.example.cpp
+++ b/nn/runtime/test/generated/examples/conv_quant8_weights_as_inputs.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: conv_quant8_weights_as_inputs.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/depth_to_space_float_1.example.cpp b/nn/runtime/test/generated/examples/depth_to_space_float_1.example.cpp
index fed858a..995d38d 100644
--- a/nn/runtime/test/generated/examples/depth_to_space_float_1.example.cpp
+++ b/nn/runtime/test/generated/examples/depth_to_space_float_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depth_to_space_float_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/depth_to_space_float_1_relaxed.example.cpp b/nn/runtime/test/generated/examples/depth_to_space_float_1_relaxed.example.cpp
index 8e84175..7b2a233 100644
--- a/nn/runtime/test/generated/examples/depth_to_space_float_1_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/depth_to_space_float_1_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depth_to_space_float_1_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/depth_to_space_float_2.example.cpp b/nn/runtime/test/generated/examples/depth_to_space_float_2.example.cpp
index 5878dfb..91e6f42 100644
--- a/nn/runtime/test/generated/examples/depth_to_space_float_2.example.cpp
+++ b/nn/runtime/test/generated/examples/depth_to_space_float_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depth_to_space_float_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/depth_to_space_float_2_relaxed.example.cpp b/nn/runtime/test/generated/examples/depth_to_space_float_2_relaxed.example.cpp
index 1fa95b2..e02a421 100644
--- a/nn/runtime/test/generated/examples/depth_to_space_float_2_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/depth_to_space_float_2_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depth_to_space_float_2_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/depth_to_space_float_3.example.cpp b/nn/runtime/test/generated/examples/depth_to_space_float_3.example.cpp
index 72f54bc..1417ea6 100644
--- a/nn/runtime/test/generated/examples/depth_to_space_float_3.example.cpp
+++ b/nn/runtime/test/generated/examples/depth_to_space_float_3.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depth_to_space_float_3.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/depth_to_space_float_3_relaxed.example.cpp b/nn/runtime/test/generated/examples/depth_to_space_float_3_relaxed.example.cpp
index b77c4d7..6953241 100644
--- a/nn/runtime/test/generated/examples/depth_to_space_float_3_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/depth_to_space_float_3_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depth_to_space_float_3_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/depth_to_space_quant8_1.example.cpp b/nn/runtime/test/generated/examples/depth_to_space_quant8_1.example.cpp
index caec79f..28402b9 100644
--- a/nn/runtime/test/generated/examples/depth_to_space_quant8_1.example.cpp
+++ b/nn/runtime/test/generated/examples/depth_to_space_quant8_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depth_to_space_quant8_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/depth_to_space_quant8_2.example.cpp b/nn/runtime/test/generated/examples/depth_to_space_quant8_2.example.cpp
index 31e0d75..56d661d 100644
--- a/nn/runtime/test/generated/examples/depth_to_space_quant8_2.example.cpp
+++ b/nn/runtime/test/generated/examples/depth_to_space_quant8_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depth_to_space_quant8_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/depth_to_space_v1_2.example.cpp b/nn/runtime/test/generated/examples/depth_to_space_v1_2.example.cpp
index a864858..4de5f70 100644
--- a/nn/runtime/test/generated/examples/depth_to_space_v1_2.example.cpp
+++ b/nn/runtime/test/generated/examples/depth_to_space_v1_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depth_to_space_v1_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples_nhwc = {
+std::vector<MixedTypedExample>& get_examples_nhwc() {
+static std::vector<MixedTypedExample> examples_nhwc = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,48 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_float16() {
+static std::vector<MixedTypedExample> examples_nhwc_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.399999976158142f, 2.299999952316284f, 3.200000047683716f, 4.099999904632568f, 5.400000095367432f, 6.300000190734863f, 7.199999809265137f, 8.100000381469727f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.399999976158142f, 2.299999952316284f, 3.200000047683716f, 4.099999904632568f, 5.400000095367432f, 6.300000190734863f, 7.199999809265137f, 8.100000381469727f}}},
+}
+},
+}, // End of an example
+};
+return examples_nhwc_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nchw = {
+std::vector<MixedTypedExample>& get_examples_nchw() {
+static std::vector<MixedTypedExample> examples_nchw = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +182,11 @@
 },
 }, // End of an example
 };
+return examples_nchw;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -169,8 +219,48 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nchw_float16() {
+static std::vector<MixedTypedExample> examples_nchw_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.399999976158142f, 2.299999952316284f, 3.200000047683716f, 4.099999904632568f, 5.400000095367432f, 6.300000190734863f, 7.199999809265137f, 8.100000381469727f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.399999976158142f, 3.200000047683716f, 5.400000095367432f, 7.199999809265137f, 2.299999952316284f, 4.099999904632568f, 6.300000190734863f, 8.100000381469727f}}},
+}
+},
+}, // End of an example
+};
+return examples_nchw_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_nchw_quant8() {
+static std::vector<MixedTypedExample> examples_nchw_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -203,8 +293,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_2() {
+static std::vector<MixedTypedExample> examples_nhwc_2 = {
 // Begin of an example
 {
 .operands = {
@@ -237,8 +330,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -271,8 +367,48 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_float16_2() {
+static std::vector<MixedTypedExample> examples_nhwc_float16_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 5.0f, 6.0f, 3.0f, 4.0f, 7.0f, 8.0f, 9.0f, 10.0f, 13.0f, 14.0f, 11.0f, 12.0f, 15.0f, 16.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_nhwc_float16_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8_2() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -305,8 +441,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_2() {
+static std::vector<MixedTypedExample> examples_nchw_2 = {
 // Begin of an example
 {
 .operands = {
@@ -339,8 +478,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -373,8 +515,48 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_float16_2() {
+static std::vector<MixedTypedExample> examples_nchw_float16_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 3.0f, 9.0f, 11.0f, 2.0f, 4.0f, 10.0f, 12.0f, 5.0f, 7.0f, 13.0f, 15.0f, 6.0f, 8.0f, 14.0f, 16.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_nchw_float16_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_nchw_quant8_2() {
+static std::vector<MixedTypedExample> examples_nchw_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -407,8 +589,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_3 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_3() {
+static std::vector<MixedTypedExample> examples_nhwc_3 = {
 // Begin of an example
 {
 .operands = {
@@ -441,8 +626,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_3;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed_3 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed_3() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed_3 = {
 // Begin of an example
 {
 .operands = {
@@ -475,8 +663,48 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed_3;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8_3 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_float16_3() {
+static std::vector<MixedTypedExample> examples_nhwc_float16_3 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {10.0f, 20.0f, 11.0f, 21.0f, 14.0f, 24.0f, 15.0f, 25.0f, 12.0f, 22.0f, 13.0f, 23.0f, 16.0f, 26.0f, 17.0f, 27.0f, 18.0f, 28.0f, 19.0f, 29.0f, 112.0f, 212.0f, 113.0f, 213.0f, 110.0f, 210.0f, 111.0f, 211.0f, 114.0f, 214.0f, 115.0f, 215.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {10.0f, 20.0f, 11.0f, 21.0f, 12.0f, 22.0f, 13.0f, 23.0f, 14.0f, 24.0f, 15.0f, 25.0f, 16.0f, 26.0f, 17.0f, 27.0f, 18.0f, 28.0f, 19.0f, 29.0f, 110.0f, 210.0f, 111.0f, 211.0f, 112.0f, 212.0f, 113.0f, 213.0f, 114.0f, 214.0f, 115.0f, 215.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_nhwc_float16_3;
+};
+
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8_3() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8_3 = {
 // Begin of an example
 {
 .operands = {
@@ -509,8 +737,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8_3;
+};
 
-std::vector<MixedTypedExample> examples_nchw_3 = {
+std::vector<MixedTypedExample>& get_examples_nchw_3() {
+static std::vector<MixedTypedExample> examples_nchw_3 = {
 // Begin of an example
 {
 .operands = {
@@ -543,8 +774,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_3;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed_3 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed_3() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed_3 = {
 // Begin of an example
 {
 .operands = {
@@ -577,8 +811,48 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed_3;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8_3 = {
+std::vector<MixedTypedExample>& get_examples_nchw_float16_3() {
+static std::vector<MixedTypedExample> examples_nchw_float16_3 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {10.0f, 12.0f, 18.0f, 110.0f, 20.0f, 22.0f, 28.0f, 210.0f, 11.0f, 13.0f, 19.0f, 111.0f, 21.0f, 23.0f, 29.0f, 211.0f, 14.0f, 16.0f, 112.0f, 114.0f, 24.0f, 26.0f, 212.0f, 214.0f, 15.0f, 17.0f, 113.0f, 115.0f, 25.0f, 27.0f, 213.0f, 215.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 110.0f, 111.0f, 112.0f, 113.0f, 114.0f, 115.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 210.0f, 211.0f, 212.0f, 213.0f, 214.0f, 215.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_nchw_float16_3;
+};
+
+std::vector<MixedTypedExample>& get_examples_nchw_quant8_3() {
+static std::vector<MixedTypedExample> examples_nchw_quant8_3 = {
 // Begin of an example
 {
 .operands = {
@@ -611,4 +885,6 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8_3;
+};
 
diff --git a/nn/runtime/test/generated/examples/depthwise_conv.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv.example.cpp
index 4fffaf2..09144cc 100644
--- a/nn/runtime/test/generated/examples/depthwise_conv.example.cpp
+++ b/nn/runtime/test/generated/examples/depthwise_conv.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depthwise_conv.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -64,4 +65,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_float.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_float.example.cpp
index d007adf..ce640c2 100644
--- a/nn/runtime/test/generated/examples/depthwise_conv2d_float.example.cpp
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_float.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depthwise_conv2d_float.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_float_2.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_float_2.example.cpp
index a401626..39d6d68 100644
--- a/nn/runtime/test/generated/examples/depthwise_conv2d_float_2.example.cpp
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_float_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depthwise_conv2d_float_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_float_2_relaxed.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_float_2_relaxed.example.cpp
index b724cd3..47bd55a 100644
--- a/nn/runtime/test/generated/examples/depthwise_conv2d_float_2_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_float_2_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depthwise_conv2d_float_2_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_float_large.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_float_large.example.cpp
index aefc862..b6bd386 100644
--- a/nn/runtime/test/generated/examples/depthwise_conv2d_float_large.example.cpp
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_float_large.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depthwise_conv2d_float_large.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2.example.cpp
index 6304457..da26d9e 100644
--- a/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2.example.cpp
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depthwise_conv2d_float_large_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2_relaxed.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2_relaxed.example.cpp
index fe13eab..32548bd 100644
--- a/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depthwise_conv2d_float_large_2_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2_weights_as_inputs.example.cpp
index 956fe10..d062ab9 100644
--- a/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2_weights_as_inputs.example.cpp
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2_weights_as_inputs.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depthwise_conv2d_float_large_2_weights_as_inputs.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2_weights_as_inputs_relaxed.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2_weights_as_inputs_relaxed.example.cpp
index fd94e9e..0ebe701 100644
--- a/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2_weights_as_inputs_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2_weights_as_inputs_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depthwise_conv2d_float_large_2_weights_as_inputs_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_relaxed.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_relaxed.example.cpp
index 7181648..6a6eb15 100644
--- a/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depthwise_conv2d_float_large_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_weights_as_inputs.example.cpp
index ed76720..44fbeb6 100644
--- a/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_weights_as_inputs.example.cpp
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_weights_as_inputs.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depthwise_conv2d_float_large_weights_as_inputs.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_weights_as_inputs_relaxed.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_weights_as_inputs_relaxed.example.cpp
index db712c9..374add6 100644
--- a/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_weights_as_inputs_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_weights_as_inputs_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depthwise_conv2d_float_large_weights_as_inputs_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_float_relaxed.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_float_relaxed.example.cpp
index a35e022..788a461 100644
--- a/nn/runtime/test/generated/examples/depthwise_conv2d_float_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_float_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depthwise_conv2d_float_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_float_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_float_weights_as_inputs.example.cpp
index 843d394..7be027a 100644
--- a/nn/runtime/test/generated/examples/depthwise_conv2d_float_weights_as_inputs.example.cpp
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_float_weights_as_inputs.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depthwise_conv2d_float_weights_as_inputs.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_float_weights_as_inputs_relaxed.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_float_weights_as_inputs_relaxed.example.cpp
index 202d1a1..0056051 100644
--- a/nn/runtime/test/generated/examples/depthwise_conv2d_float_weights_as_inputs_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_float_weights_as_inputs_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depthwise_conv2d_float_weights_as_inputs_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_quant8.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_quant8.example.cpp
index bd4bea0..83edaa5 100644
--- a/nn/runtime/test/generated/examples/depthwise_conv2d_quant8.example.cpp
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_quant8.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depthwise_conv2d_quant8.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_quant8_2.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_quant8_2.example.cpp
index 1a5b071..75e0a3d 100644
--- a/nn/runtime/test/generated/examples/depthwise_conv2d_quant8_2.example.cpp
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_quant8_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depthwise_conv2d_quant8_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_quant8_large.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_quant8_large.example.cpp
index 1acd026..a21dd16 100644
--- a/nn/runtime/test/generated/examples/depthwise_conv2d_quant8_large.example.cpp
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_quant8_large.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depthwise_conv2d_quant8_large.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_quant8_large_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_quant8_large_weights_as_inputs.example.cpp
index 085d414..53a885b 100644
--- a/nn/runtime/test/generated/examples/depthwise_conv2d_quant8_large_weights_as_inputs.example.cpp
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_quant8_large_weights_as_inputs.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depthwise_conv2d_quant8_large_weights_as_inputs.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_quant8_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_quant8_weights_as_inputs.example.cpp
index 6ac634e..2f200ed 100644
--- a/nn/runtime/test/generated/examples/depthwise_conv2d_quant8_weights_as_inputs.example.cpp
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_quant8_weights_as_inputs.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depthwise_conv2d_quant8_weights_as_inputs.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_v1_2.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_v1_2.example.cpp
index af9249d..ec696e9 100644
--- a/nn/runtime/test/generated/examples/depthwise_conv2d_v1_2.example.cpp
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_v1_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depthwise_conv2d_v1_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples_nhwc = {
+std::vector<MixedTypedExample>& get_examples_nhwc() {
+static std::vector<MixedTypedExample> examples_nhwc = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,48 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_float16() {
+static std::vector<MixedTypedExample> examples_nhwc_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {10.0f, 21.0f, 10.0f, 22.0f, 10.0f, 23.0f, 10.0f, 24.0f, 10.0f, 25.0f, 10.0f, 26.0f, 10.0f, 27.0f, 10.0f, 28.0f, 10.0f, 29.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {11.0f, 3.0f, 7.199999809265137f, 10.600000381469727f, 11.0f, 3.0f, 7.400000095367432f, 10.899999618530273f, 11.0f, 3.0f, 7.800000190734863f, 11.5f, 11.0f, 3.0f, 8.0f, 11.800000190734863f}}},
+}
+},
+}, // End of an example
+};
+return examples_nhwc_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nhwc_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nhwc_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +182,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_weight_as_input_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nhwc_weight_as_input_relaxed() {
+static std::vector<MixedTypedExample> examples_nhwc_weight_as_input_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -169,8 +219,48 @@
 },
 }, // End of an example
 };
+return examples_nhwc_weight_as_input_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_weight_as_input_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_weight_as_input_float16() {
+static std::vector<MixedTypedExample> examples_nhwc_weight_as_input_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {10.0f, 21.0f, 10.0f, 22.0f, 10.0f, 23.0f, 10.0f, 24.0f, 10.0f, 25.0f, 10.0f, 26.0f, 10.0f, 27.0f, 10.0f, 28.0f, 10.0f, 29.0f}}, {1, {0.25f, 0.0f, 0.20000000298023224f, 0.0f, 0.25f, 0.0f, 0.0f, 0.30000001192092896f, 0.25f, 0.0f, 0.0f, 0.0f, 0.25f, 0.10000000149011612f, 0.0f, 0.0f}}, {2, {1.0f, 2.0f, 3.0f, 4.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {11.0f, 3.0f, 7.199999809265137f, 10.600000381469727f, 11.0f, 3.0f, 7.400000095367432f, 10.899999618530273f, 11.0f, 3.0f, 7.800000190734863f, 11.5f, 11.0f, 3.0f, 8.0f, 11.800000190734863f}}},
+}
+},
+}, // End of an example
+};
+return examples_nhwc_weight_as_input_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_nhwc_weight_as_input_quant8() {
+static std::vector<MixedTypedExample> examples_nhwc_weight_as_input_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -203,8 +293,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_weight_as_input_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nchw = {
+std::vector<MixedTypedExample>& get_examples_nchw() {
+static std::vector<MixedTypedExample> examples_nchw = {
 // Begin of an example
 {
 .operands = {
@@ -237,8 +330,11 @@
 },
 }, // End of an example
 };
+return examples_nchw;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -271,8 +367,48 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nchw_float16() {
+static std::vector<MixedTypedExample> examples_nchw_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {10.0f, 10.0f, 10.0f, 10.0f, 10.0f, 10.0f, 10.0f, 10.0f, 10.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {11.0f, 11.0f, 11.0f, 11.0f, 3.0f, 3.0f, 3.0f, 3.0f, 7.199999809265137f, 7.400000095367432f, 7.800000190734863f, 8.0f, 10.600000381469727f, 10.899999618530273f, 11.5f, 11.800000190734863f}}},
+}
+},
+}, // End of an example
+};
+return examples_nchw_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_nchw_quant8() {
+static std::vector<MixedTypedExample> examples_nchw_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -305,8 +441,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nchw_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nchw_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nchw_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -339,8 +478,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nchw_weight_as_input_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nchw_weight_as_input_relaxed() {
+static std::vector<MixedTypedExample> examples_nchw_weight_as_input_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -373,8 +515,48 @@
 },
 }, // End of an example
 };
+return examples_nchw_weight_as_input_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nchw_weight_as_input_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nchw_weight_as_input_float16() {
+static std::vector<MixedTypedExample> examples_nchw_weight_as_input_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {10.0f, 10.0f, 10.0f, 10.0f, 10.0f, 10.0f, 10.0f, 10.0f, 10.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f}}, {1, {0.25f, 0.0f, 0.20000000298023224f, 0.0f, 0.25f, 0.0f, 0.0f, 0.30000001192092896f, 0.25f, 0.0f, 0.0f, 0.0f, 0.25f, 0.10000000149011612f, 0.0f, 0.0f}}, {2, {1.0f, 2.0f, 3.0f, 4.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {11.0f, 11.0f, 11.0f, 11.0f, 3.0f, 3.0f, 3.0f, 3.0f, 7.199999809265137f, 7.400000095367432f, 7.800000190734863f, 8.0f, 10.600000381469727f, 10.899999618530273f, 11.5f, 11.800000190734863f}}},
+}
+},
+}, // End of an example
+};
+return examples_nchw_weight_as_input_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_nchw_weight_as_input_quant8() {
+static std::vector<MixedTypedExample> examples_nchw_weight_as_input_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -407,8 +589,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_weight_as_input_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_2() {
+static std::vector<MixedTypedExample> examples_nhwc_2 = {
 // Begin of an example
 {
 .operands = {
@@ -441,8 +626,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -475,8 +663,48 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_float16_2() {
+static std::vector<MixedTypedExample> examples_nhwc_float16_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 7.0f, 8.0f, 3.0f, 4.0f, 9.0f, 10.0f, 5.0f, 6.0f, 11.0f, 12.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {71.0f, -34.0f, 99.0f, -20.0f, 91.0f, -26.0f, 127.0f, -4.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_nhwc_float16_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8_2() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -509,8 +737,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_weight_as_input_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_weight_as_input_2() {
+static std::vector<MixedTypedExample> examples_nhwc_weight_as_input_2 = {
 // Begin of an example
 {
 .operands = {
@@ -543,8 +774,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_weight_as_input_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_weight_as_input_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_weight_as_input_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nhwc_weight_as_input_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -577,8 +811,48 @@
 },
 }, // End of an example
 };
+return examples_nhwc_weight_as_input_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_weight_as_input_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_weight_as_input_float16_2() {
+static std::vector<MixedTypedExample> examples_nhwc_weight_as_input_float16_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 7.0f, 8.0f, 3.0f, 4.0f, 9.0f, 10.0f, 5.0f, 6.0f, 11.0f, 12.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, -9.0f, 10.0f, -11.0f, 12.0f, 5.0f, 6.0f, 7.0f, 8.0f, 13.0f, -14.0f, 15.0f, -16.0f}}, {2, {1.0f, 2.0f, 3.0f, 4.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {71.0f, -34.0f, 99.0f, -20.0f, 91.0f, -26.0f, 127.0f, -4.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_nhwc_weight_as_input_float16_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_nhwc_weight_as_input_quant8_2() {
+static std::vector<MixedTypedExample> examples_nhwc_weight_as_input_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -611,8 +885,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_weight_as_input_quant8_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_2() {
+static std::vector<MixedTypedExample> examples_nchw_2 = {
 // Begin of an example
 {
 .operands = {
@@ -645,8 +922,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -679,8 +959,48 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_float16_2() {
+static std::vector<MixedTypedExample> examples_nchw_float16_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 7.0f, 3.0f, 9.0f, 5.0f, 11.0f, 2.0f, 8.0f, 4.0f, 10.0f, 6.0f, 12.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {71.0f, 91.0f, -34.0f, -26.0f, 99.0f, 127.0f, -20.0f, -4.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_nchw_float16_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_nchw_quant8_2() {
+static std::vector<MixedTypedExample> examples_nchw_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -713,8 +1033,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_weight_as_input_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_weight_as_input_2() {
+static std::vector<MixedTypedExample> examples_nchw_weight_as_input_2 = {
 // Begin of an example
 {
 .operands = {
@@ -747,8 +1070,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_weight_as_input_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_weight_as_input_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_weight_as_input_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nchw_weight_as_input_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -781,8 +1107,48 @@
 },
 }, // End of an example
 };
+return examples_nchw_weight_as_input_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_weight_as_input_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_weight_as_input_float16_2() {
+static std::vector<MixedTypedExample> examples_nchw_weight_as_input_float16_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 7.0f, 3.0f, 9.0f, 5.0f, 11.0f, 2.0f, 8.0f, 4.0f, 10.0f, 6.0f, 12.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, -9.0f, 10.0f, -11.0f, 12.0f, 5.0f, 6.0f, 7.0f, 8.0f, 13.0f, -14.0f, 15.0f, -16.0f}}, {2, {1.0f, 2.0f, 3.0f, 4.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {71.0f, 91.0f, -34.0f, -26.0f, 99.0f, 127.0f, -20.0f, -4.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_nchw_weight_as_input_float16_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_nchw_weight_as_input_quant8_2() {
+static std::vector<MixedTypedExample> examples_nchw_weight_as_input_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -815,8 +1181,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_weight_as_input_quant8_2;
+};
 
-std::vector<MixedTypedExample> examples_large_nhwc = {
+std::vector<MixedTypedExample>& get_examples_large_nhwc() {
+static std::vector<MixedTypedExample> examples_large_nhwc = {
 // Begin of an example
 {
 .operands = {
@@ -849,8 +1218,11 @@
 },
 }, // End of an example
 };
+return examples_large_nhwc;
+};
 
-std::vector<MixedTypedExample> examples_large_nhwc_relaxed = {
+std::vector<MixedTypedExample>& get_examples_large_nhwc_relaxed() {
+static std::vector<MixedTypedExample> examples_large_nhwc_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -883,8 +1255,48 @@
 },
 }, // End of an example
 };
+return examples_large_nhwc_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_large_nhwc_quant8 = {
+std::vector<MixedTypedExample>& get_examples_large_nhwc_float16() {
+static std::vector<MixedTypedExample> examples_large_nhwc_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {10.0f, 21.0f, 10.0f, 22.0f, 10.0f, 23.0f, 10.0f, 24.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {110.0f, 246.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_large_nhwc_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_large_nhwc_quant8() {
+static std::vector<MixedTypedExample> examples_large_nhwc_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -917,8 +1329,11 @@
 },
 }, // End of an example
 };
+return examples_large_nhwc_quant8;
+};
 
-std::vector<MixedTypedExample> examples_large_nhwc_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_large_nhwc_weight_as_input() {
+static std::vector<MixedTypedExample> examples_large_nhwc_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -951,8 +1366,11 @@
 },
 }, // End of an example
 };
+return examples_large_nhwc_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_large_nhwc_weight_as_input_relaxed = {
+std::vector<MixedTypedExample>& get_examples_large_nhwc_weight_as_input_relaxed() {
+static std::vector<MixedTypedExample> examples_large_nhwc_weight_as_input_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -985,8 +1403,48 @@
 },
 }, // End of an example
 };
+return examples_large_nhwc_weight_as_input_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_large_nhwc_weight_as_input_quant8 = {
+std::vector<MixedTypedExample>& get_examples_large_nhwc_weight_as_input_float16() {
+static std::vector<MixedTypedExample> examples_large_nhwc_weight_as_input_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {10.0f, 21.0f, 10.0f, 22.0f, 10.0f, 23.0f, 10.0f, 24.0f}}, {1, {0.25f, 0.0f, 0.25f, 1.0f, 0.25f, 0.0f, 0.25f, 1.0f}}, {2, {100.0f, 200.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {110.0f, 246.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_large_nhwc_weight_as_input_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_large_nhwc_weight_as_input_quant8() {
+static std::vector<MixedTypedExample> examples_large_nhwc_weight_as_input_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -1019,8 +1477,11 @@
 },
 }, // End of an example
 };
+return examples_large_nhwc_weight_as_input_quant8;
+};
 
-std::vector<MixedTypedExample> examples_large_nchw = {
+std::vector<MixedTypedExample>& get_examples_large_nchw() {
+static std::vector<MixedTypedExample> examples_large_nchw = {
 // Begin of an example
 {
 .operands = {
@@ -1053,8 +1514,11 @@
 },
 }, // End of an example
 };
+return examples_large_nchw;
+};
 
-std::vector<MixedTypedExample> examples_large_nchw_relaxed = {
+std::vector<MixedTypedExample>& get_examples_large_nchw_relaxed() {
+static std::vector<MixedTypedExample> examples_large_nchw_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -1087,8 +1551,48 @@
 },
 }, // End of an example
 };
+return examples_large_nchw_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_large_nchw_quant8 = {
+std::vector<MixedTypedExample>& get_examples_large_nchw_float16() {
+static std::vector<MixedTypedExample> examples_large_nchw_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {10.0f, 10.0f, 10.0f, 10.0f, 21.0f, 22.0f, 23.0f, 24.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {110.0f, 246.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_large_nchw_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_large_nchw_quant8() {
+static std::vector<MixedTypedExample> examples_large_nchw_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -1121,8 +1625,11 @@
 },
 }, // End of an example
 };
+return examples_large_nchw_quant8;
+};
 
-std::vector<MixedTypedExample> examples_large_nchw_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_large_nchw_weight_as_input() {
+static std::vector<MixedTypedExample> examples_large_nchw_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1155,8 +1662,11 @@
 },
 }, // End of an example
 };
+return examples_large_nchw_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_large_nchw_weight_as_input_relaxed = {
+std::vector<MixedTypedExample>& get_examples_large_nchw_weight_as_input_relaxed() {
+static std::vector<MixedTypedExample> examples_large_nchw_weight_as_input_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -1189,8 +1699,48 @@
 },
 }, // End of an example
 };
+return examples_large_nchw_weight_as_input_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_large_nchw_weight_as_input_quant8 = {
+std::vector<MixedTypedExample>& get_examples_large_nchw_weight_as_input_float16() {
+static std::vector<MixedTypedExample> examples_large_nchw_weight_as_input_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {10.0f, 10.0f, 10.0f, 10.0f, 21.0f, 22.0f, 23.0f, 24.0f}}, {1, {0.25f, 0.0f, 0.25f, 1.0f, 0.25f, 0.0f, 0.25f, 1.0f}}, {2, {100.0f, 200.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {110.0f, 246.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_large_nchw_weight_as_input_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_large_nchw_weight_as_input_quant8() {
+static std::vector<MixedTypedExample> examples_large_nchw_weight_as_input_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -1223,8 +1773,11 @@
 },
 }, // End of an example
 };
+return examples_large_nchw_weight_as_input_quant8;
+};
 
-std::vector<MixedTypedExample> examples_large_nhwc_2 = {
+std::vector<MixedTypedExample>& get_examples_large_nhwc_2() {
+static std::vector<MixedTypedExample> examples_large_nhwc_2 = {
 // Begin of an example
 {
 .operands = {
@@ -1257,8 +1810,11 @@
 },
 }, // End of an example
 };
+return examples_large_nhwc_2;
+};
 
-std::vector<MixedTypedExample> examples_large_nhwc_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_large_nhwc_relaxed_2() {
+static std::vector<MixedTypedExample> examples_large_nhwc_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -1291,8 +1847,48 @@
 },
 }, // End of an example
 };
+return examples_large_nhwc_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_large_nhwc_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_large_nhwc_float16_2() {
+static std::vector<MixedTypedExample> examples_large_nhwc_float16_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {10.0f, 21.0f, 10.0f, 0.0f, 10.0f, 22.0f, 20.0f, 0.0f, 10.0f, 23.0f, 30.0f, 0.0f, 10.0f, 24.0f, 40.0f, 0.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {6010.0f, 7046.0f, 11000.0f, 9000.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_large_nhwc_float16_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_large_nhwc_quant8_2() {
+static std::vector<MixedTypedExample> examples_large_nhwc_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -1325,8 +1921,11 @@
 },
 }, // End of an example
 };
+return examples_large_nhwc_quant8_2;
+};
 
-std::vector<MixedTypedExample> examples_large_nhwc_weight_as_input_2 = {
+std::vector<MixedTypedExample>& get_examples_large_nhwc_weight_as_input_2() {
+static std::vector<MixedTypedExample> examples_large_nhwc_weight_as_input_2 = {
 // Begin of an example
 {
 .operands = {
@@ -1359,8 +1958,11 @@
 },
 }, // End of an example
 };
+return examples_large_nhwc_weight_as_input_2;
+};
 
-std::vector<MixedTypedExample> examples_large_nhwc_weight_as_input_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_large_nhwc_weight_as_input_relaxed_2() {
+static std::vector<MixedTypedExample> examples_large_nhwc_weight_as_input_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -1393,8 +1995,48 @@
 },
 }, // End of an example
 };
+return examples_large_nhwc_weight_as_input_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_large_nhwc_weight_as_input_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_large_nhwc_weight_as_input_float16_2() {
+static std::vector<MixedTypedExample> examples_large_nhwc_weight_as_input_float16_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {10.0f, 21.0f, 10.0f, 0.0f, 10.0f, 22.0f, 20.0f, 0.0f, 10.0f, 23.0f, 30.0f, 0.0f, 10.0f, 24.0f, 40.0f, 0.0f}}, {1, {0.25f, 0.0f, 10.0f, 50.0f, 0.25f, 1.0f, 20.0f, 50.0f, 0.25f, 0.0f, 30.0f, 50.0f, 0.25f, 1.0f, 40.0f, 50.0f}}, {2, {6000.0f, 7000.0f, 8000.0f, 9000.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {6010.0f, 7046.0f, 11000.0f, 9000.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_large_nhwc_weight_as_input_float16_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_large_nhwc_weight_as_input_quant8_2() {
+static std::vector<MixedTypedExample> examples_large_nhwc_weight_as_input_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -1427,8 +2069,11 @@
 },
 }, // End of an example
 };
+return examples_large_nhwc_weight_as_input_quant8_2;
+};
 
-std::vector<MixedTypedExample> examples_large_nchw_2 = {
+std::vector<MixedTypedExample>& get_examples_large_nchw_2() {
+static std::vector<MixedTypedExample> examples_large_nchw_2 = {
 // Begin of an example
 {
 .operands = {
@@ -1461,8 +2106,11 @@
 },
 }, // End of an example
 };
+return examples_large_nchw_2;
+};
 
-std::vector<MixedTypedExample> examples_large_nchw_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_large_nchw_relaxed_2() {
+static std::vector<MixedTypedExample> examples_large_nchw_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -1495,8 +2143,48 @@
 },
 }, // End of an example
 };
+return examples_large_nchw_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_large_nchw_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_large_nchw_float16_2() {
+static std::vector<MixedTypedExample> examples_large_nchw_float16_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {10.0f, 10.0f, 10.0f, 10.0f, 21.0f, 22.0f, 23.0f, 24.0f, 10.0f, 20.0f, 30.0f, 40.0f, 0.0f, 0.0f, 0.0f, 0.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {6010.0f, 7046.0f, 11000.0f, 9000.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_large_nchw_float16_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_large_nchw_quant8_2() {
+static std::vector<MixedTypedExample> examples_large_nchw_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -1529,8 +2217,11 @@
 },
 }, // End of an example
 };
+return examples_large_nchw_quant8_2;
+};
 
-std::vector<MixedTypedExample> examples_large_nchw_weight_as_input_2 = {
+std::vector<MixedTypedExample>& get_examples_large_nchw_weight_as_input_2() {
+static std::vector<MixedTypedExample> examples_large_nchw_weight_as_input_2 = {
 // Begin of an example
 {
 .operands = {
@@ -1563,8 +2254,11 @@
 },
 }, // End of an example
 };
+return examples_large_nchw_weight_as_input_2;
+};
 
-std::vector<MixedTypedExample> examples_large_nchw_weight_as_input_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_large_nchw_weight_as_input_relaxed_2() {
+static std::vector<MixedTypedExample> examples_large_nchw_weight_as_input_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -1597,8 +2291,48 @@
 },
 }, // End of an example
 };
+return examples_large_nchw_weight_as_input_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_large_nchw_weight_as_input_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_large_nchw_weight_as_input_float16_2() {
+static std::vector<MixedTypedExample> examples_large_nchw_weight_as_input_float16_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {10.0f, 10.0f, 10.0f, 10.0f, 21.0f, 22.0f, 23.0f, 24.0f, 10.0f, 20.0f, 30.0f, 40.0f, 0.0f, 0.0f, 0.0f, 0.0f}}, {1, {0.25f, 0.0f, 10.0f, 50.0f, 0.25f, 1.0f, 20.0f, 50.0f, 0.25f, 0.0f, 30.0f, 50.0f, 0.25f, 1.0f, 40.0f, 50.0f}}, {2, {6000.0f, 7000.0f, 8000.0f, 9000.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {6010.0f, 7046.0f, 11000.0f, 9000.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_large_nchw_weight_as_input_float16_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_large_nchw_weight_as_input_quant8_2() {
+static std::vector<MixedTypedExample> examples_large_nchw_weight_as_input_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -1631,4 +2365,6 @@
 },
 }, // End of an example
 };
+return examples_large_nchw_weight_as_input_quant8_2;
+};
 
diff --git a/nn/runtime/test/generated/examples/depthwise_conv_relaxed.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv_relaxed.example.cpp
index d1ed12b..d3367ca 100644
--- a/nn/runtime/test/generated/examples/depthwise_conv_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/depthwise_conv_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: depthwise_conv_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -64,4 +65,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/dequantize.example.cpp b/nn/runtime/test/generated/examples/dequantize.example.cpp
index 881e10a..82ab925 100644
--- a/nn/runtime/test/generated/examples/dequantize.example.cpp
+++ b/nn/runtime/test/generated/examples/dequantize.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: dequantize.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/dequantize_relaxed.example.cpp b/nn/runtime/test/generated/examples/dequantize_relaxed.example.cpp
index 673afcc..335a3e2 100644
--- a/nn/runtime/test/generated/examples/dequantize_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/dequantize_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: dequantize_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/div.example.cpp b/nn/runtime/test/generated/examples/div.example.cpp
index cde411f..30af7c4 100644
--- a/nn/runtime/test/generated/examples/div.example.cpp
+++ b/nn/runtime/test/generated/examples/div.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: div.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/div_broadcast_float.example.cpp b/nn/runtime/test/generated/examples/div_broadcast_float.example.cpp
index 9053168..e51d32b 100644
--- a/nn/runtime/test/generated/examples/div_broadcast_float.example.cpp
+++ b/nn/runtime/test/generated/examples/div_broadcast_float.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: div_broadcast_float.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/div_broadcast_float16.example.cpp b/nn/runtime/test/generated/examples/div_broadcast_float16.example.cpp
index 0706cae..6969aee 100644
--- a/nn/runtime/test/generated/examples/div_broadcast_float16.example.cpp
+++ b/nn/runtime/test/generated/examples/div_broadcast_float16.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: div_broadcast_float16.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/div_broadcast_float_relaxed.example.cpp b/nn/runtime/test/generated/examples/div_broadcast_float_relaxed.example.cpp
index 0ecc218..529e07e 100644
--- a/nn/runtime/test/generated/examples/div_broadcast_float_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/div_broadcast_float_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: div_broadcast_float_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/div_float16.example.cpp b/nn/runtime/test/generated/examples/div_float16.example.cpp
index 12a8fe2..c42534c 100644
--- a/nn/runtime/test/generated/examples/div_float16.example.cpp
+++ b/nn/runtime/test/generated/examples/div_float16.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: div_float16.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/div_relaxed.example.cpp b/nn/runtime/test/generated/examples/div_relaxed.example.cpp
index 3b72abc..8d7d242 100644
--- a/nn/runtime/test/generated/examples/div_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/div_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: div_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/embedding_lookup.example.cpp b/nn/runtime/test/generated/examples/embedding_lookup.example.cpp
index 120462a..61b3be2 100644
--- a/nn/runtime/test/generated/examples/embedding_lookup.example.cpp
+++ b/nn/runtime/test/generated/examples/embedding_lookup.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: embedding_lookup.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/embedding_lookup_relaxed.example.cpp b/nn/runtime/test/generated/examples/embedding_lookup_relaxed.example.cpp
index 38e58f2..98f2a5e 100644
--- a/nn/runtime/test/generated/examples/embedding_lookup_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/embedding_lookup_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: embedding_lookup_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/expand_dims.example.cpp b/nn/runtime/test/generated/examples/expand_dims.example.cpp
index d65d03a..15e4f23 100644
--- a/nn/runtime/test/generated/examples/expand_dims.example.cpp
+++ b/nn/runtime/test/generated/examples/expand_dims.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: expand_dims.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
-std::vector<MixedTypedExample> examples_relaxed = {
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_quant8 = {
+std::vector<MixedTypedExample>& get_examples_quant8() {
+static std::vector<MixedTypedExample> examples_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +108,11 @@
 },
 }, // End of an example
 };
+return examples_quant8;
+};
 
-std::vector<MixedTypedExample> examples_int32 = {
+std::vector<MixedTypedExample>& get_examples_int32() {
+static std::vector<MixedTypedExample> examples_int32 = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_int32;
+};
 
-std::vector<MixedTypedExample> examples_2 = {
+std::vector<MixedTypedExample>& get_examples_2() {
+static std::vector<MixedTypedExample> examples_2 = {
 // Begin of an example
 {
 .operands = {
@@ -169,8 +182,11 @@
 },
 }, // End of an example
 };
+return examples_2;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_2() {
+static std::vector<MixedTypedExample> examples_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -203,8 +219,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_quant8_2() {
+static std::vector<MixedTypedExample> examples_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -237,8 +256,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_2;
+};
 
-std::vector<MixedTypedExample> examples_int32_2 = {
+std::vector<MixedTypedExample>& get_examples_int32_2() {
+static std::vector<MixedTypedExample> examples_int32_2 = {
 // Begin of an example
 {
 .operands = {
@@ -271,8 +293,11 @@
 },
 }, // End of an example
 };
+return examples_int32_2;
+};
 
-std::vector<MixedTypedExample> examples_3 = {
+std::vector<MixedTypedExample>& get_examples_3() {
+static std::vector<MixedTypedExample> examples_3 = {
 // Begin of an example
 {
 .operands = {
@@ -305,8 +330,11 @@
 },
 }, // End of an example
 };
+return examples_3;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_3 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_3() {
+static std::vector<MixedTypedExample> examples_relaxed_3 = {
 // Begin of an example
 {
 .operands = {
@@ -339,8 +367,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_3;
+};
 
-std::vector<MixedTypedExample> examples_quant8_3 = {
+std::vector<MixedTypedExample>& get_examples_quant8_3() {
+static std::vector<MixedTypedExample> examples_quant8_3 = {
 // Begin of an example
 {
 .operands = {
@@ -373,8 +404,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_3;
+};
 
-std::vector<MixedTypedExample> examples_int32_3 = {
+std::vector<MixedTypedExample>& get_examples_int32_3() {
+static std::vector<MixedTypedExample> examples_int32_3 = {
 // Begin of an example
 {
 .operands = {
@@ -407,8 +441,11 @@
 },
 }, // End of an example
 };
+return examples_int32_3;
+};
 
-std::vector<MixedTypedExample> examples_4 = {
+std::vector<MixedTypedExample>& get_examples_4() {
+static std::vector<MixedTypedExample> examples_4 = {
 // Begin of an example
 {
 .operands = {
@@ -441,8 +478,11 @@
 },
 }, // End of an example
 };
+return examples_4;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_4 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_4() {
+static std::vector<MixedTypedExample> examples_relaxed_4 = {
 // Begin of an example
 {
 .operands = {
@@ -475,8 +515,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_4;
+};
 
-std::vector<MixedTypedExample> examples_quant8_4 = {
+std::vector<MixedTypedExample>& get_examples_quant8_4() {
+static std::vector<MixedTypedExample> examples_quant8_4 = {
 // Begin of an example
 {
 .operands = {
@@ -509,8 +552,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_4;
+};
 
-std::vector<MixedTypedExample> examples_int32_4 = {
+std::vector<MixedTypedExample>& get_examples_int32_4() {
+static std::vector<MixedTypedExample> examples_int32_4 = {
 // Begin of an example
 {
 .operands = {
@@ -543,4 +589,6 @@
 },
 }, // End of an example
 };
+return examples_int32_4;
+};
 
diff --git a/nn/runtime/test/generated/examples/floor.example.cpp b/nn/runtime/test/generated/examples/floor.example.cpp
index f5d6534..06c3c24 100644
--- a/nn/runtime/test/generated/examples/floor.example.cpp
+++ b/nn/runtime/test/generated/examples/floor.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: floor.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/floor_relaxed.example.cpp b/nn/runtime/test/generated/examples/floor_relaxed.example.cpp
index 0b604da..3b6083e 100644
--- a/nn/runtime/test/generated/examples/floor_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/floor_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: floor_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/fully_connected_float.example.cpp b/nn/runtime/test/generated/examples/fully_connected_float.example.cpp
index f29bb26..32f12fe 100644
--- a/nn/runtime/test/generated/examples/fully_connected_float.example.cpp
+++ b/nn/runtime/test/generated/examples/fully_connected_float.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: fully_connected_float.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/fully_connected_float_2.example.cpp b/nn/runtime/test/generated/examples/fully_connected_float_2.example.cpp
index a66638a..44b7043 100644
--- a/nn/runtime/test/generated/examples/fully_connected_float_2.example.cpp
+++ b/nn/runtime/test/generated/examples/fully_connected_float_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: fully_connected_float_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/fully_connected_float_2_relaxed.example.cpp b/nn/runtime/test/generated/examples/fully_connected_float_2_relaxed.example.cpp
index 2e5a067..3b121f6 100644
--- a/nn/runtime/test/generated/examples/fully_connected_float_2_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/fully_connected_float_2_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: fully_connected_float_2_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/fully_connected_float_3.example.cpp b/nn/runtime/test/generated/examples/fully_connected_float_3.example.cpp
index d9288d3..49d09ab 100644
--- a/nn/runtime/test/generated/examples/fully_connected_float_3.example.cpp
+++ b/nn/runtime/test/generated/examples/fully_connected_float_3.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: fully_connected_float_3.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/fully_connected_float_4d_simple.example.cpp b/nn/runtime/test/generated/examples/fully_connected_float_4d_simple.example.cpp
index 828c7ee..1b6d494 100644
--- a/nn/runtime/test/generated/examples/fully_connected_float_4d_simple.example.cpp
+++ b/nn/runtime/test/generated/examples/fully_connected_float_4d_simple.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: fully_connected_float_4d_simple.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/fully_connected_float_4d_simple_relaxed.example.cpp b/nn/runtime/test/generated/examples/fully_connected_float_4d_simple_relaxed.example.cpp
index 5c3e201..6e1f840 100644
--- a/nn/runtime/test/generated/examples/fully_connected_float_4d_simple_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/fully_connected_float_4d_simple_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: fully_connected_float_4d_simple_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/fully_connected_float_large.example.cpp b/nn/runtime/test/generated/examples/fully_connected_float_large.example.cpp
index e27c23d..05fa62f 100644
--- a/nn/runtime/test/generated/examples/fully_connected_float_large.example.cpp
+++ b/nn/runtime/test/generated/examples/fully_connected_float_large.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: fully_connected_float_large.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/fully_connected_float_large_relaxed.example.cpp b/nn/runtime/test/generated/examples/fully_connected_float_large_relaxed.example.cpp
index fc4bc62..162dde2 100644
--- a/nn/runtime/test/generated/examples/fully_connected_float_large_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/fully_connected_float_large_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: fully_connected_float_large_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/fully_connected_float_large_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/fully_connected_float_large_weights_as_inputs.example.cpp
index 5194e62..9f3f7d9 100644
--- a/nn/runtime/test/generated/examples/fully_connected_float_large_weights_as_inputs.example.cpp
+++ b/nn/runtime/test/generated/examples/fully_connected_float_large_weights_as_inputs.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: fully_connected_float_large_weights_as_inputs.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/fully_connected_float_large_weights_as_inputs_relaxed.example.cpp b/nn/runtime/test/generated/examples/fully_connected_float_large_weights_as_inputs_relaxed.example.cpp
index 4dcc76f..c2efb4a 100644
--- a/nn/runtime/test/generated/examples/fully_connected_float_large_weights_as_inputs_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/fully_connected_float_large_weights_as_inputs_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: fully_connected_float_large_weights_as_inputs_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/fully_connected_float_relaxed.example.cpp b/nn/runtime/test/generated/examples/fully_connected_float_relaxed.example.cpp
index 332a42b..1041c62 100644
--- a/nn/runtime/test/generated/examples/fully_connected_float_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/fully_connected_float_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: fully_connected_float_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/fully_connected_float_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/fully_connected_float_weights_as_inputs.example.cpp
index 5462023..7b15833 100644
--- a/nn/runtime/test/generated/examples/fully_connected_float_weights_as_inputs.example.cpp
+++ b/nn/runtime/test/generated/examples/fully_connected_float_weights_as_inputs.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: fully_connected_float_weights_as_inputs.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/fully_connected_float_weights_as_inputs_relaxed.example.cpp b/nn/runtime/test/generated/examples/fully_connected_float_weights_as_inputs_relaxed.example.cpp
index 1bcbee2..8741317 100644
--- a/nn/runtime/test/generated/examples/fully_connected_float_weights_as_inputs_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/fully_connected_float_weights_as_inputs_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: fully_connected_float_weights_as_inputs_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/fully_connected_quant8.example.cpp b/nn/runtime/test/generated/examples/fully_connected_quant8.example.cpp
index bcdd63f..f5b5082 100644
--- a/nn/runtime/test/generated/examples/fully_connected_quant8.example.cpp
+++ b/nn/runtime/test/generated/examples/fully_connected_quant8.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: fully_connected_quant8.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/fully_connected_quant8_2.example.cpp b/nn/runtime/test/generated/examples/fully_connected_quant8_2.example.cpp
index 494894b..4d19354 100644
--- a/nn/runtime/test/generated/examples/fully_connected_quant8_2.example.cpp
+++ b/nn/runtime/test/generated/examples/fully_connected_quant8_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: fully_connected_quant8_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/fully_connected_quant8_large.example.cpp b/nn/runtime/test/generated/examples/fully_connected_quant8_large.example.cpp
index eff59aa..3b0f196 100644
--- a/nn/runtime/test/generated/examples/fully_connected_quant8_large.example.cpp
+++ b/nn/runtime/test/generated/examples/fully_connected_quant8_large.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: fully_connected_quant8_large.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/fully_connected_quant8_large_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/fully_connected_quant8_large_weights_as_inputs.example.cpp
index 1f3bf2a..769df6f 100644
--- a/nn/runtime/test/generated/examples/fully_connected_quant8_large_weights_as_inputs.example.cpp
+++ b/nn/runtime/test/generated/examples/fully_connected_quant8_large_weights_as_inputs.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: fully_connected_quant8_large_weights_as_inputs.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/fully_connected_quant8_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/fully_connected_quant8_weights_as_inputs.example.cpp
index 0951819..590918b 100644
--- a/nn/runtime/test/generated/examples/fully_connected_quant8_weights_as_inputs.example.cpp
+++ b/nn/runtime/test/generated/examples/fully_connected_quant8_weights_as_inputs.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: fully_connected_quant8_weights_as_inputs.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/gather.example.cpp b/nn/runtime/test/generated/examples/gather.example.cpp
index 92fa865..6ea30b3 100644
--- a/nn/runtime/test/generated/examples/gather.example.cpp
+++ b/nn/runtime/test/generated/examples/gather.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: gather.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
-std::vector<MixedTypedExample> examples_relaxed = {
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_quant8 = {
+std::vector<MixedTypedExample>& get_examples_quant8() {
+static std::vector<MixedTypedExample> examples_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +108,11 @@
 },
 }, // End of an example
 };
+return examples_quant8;
+};
 
-std::vector<MixedTypedExample> examples_int32 = {
+std::vector<MixedTypedExample>& get_examples_int32() {
+static std::vector<MixedTypedExample> examples_int32 = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_int32;
+};
 
-std::vector<MixedTypedExample> examples_float16 = {
+std::vector<MixedTypedExample>& get_examples_float16() {
+static std::vector<MixedTypedExample> examples_float16 = {
 // Begin of an example
 {
 .operands = {
@@ -169,8 +182,11 @@
 },
 }, // End of an example
 };
+return examples_float16;
+};
 
-std::vector<MixedTypedExample> examples_2 = {
+std::vector<MixedTypedExample>& get_examples_2() {
+static std::vector<MixedTypedExample> examples_2 = {
 // Begin of an example
 {
 .operands = {
@@ -203,8 +219,11 @@
 },
 }, // End of an example
 };
+return examples_2;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_2() {
+static std::vector<MixedTypedExample> examples_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -237,8 +256,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_quant8_2() {
+static std::vector<MixedTypedExample> examples_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -271,8 +293,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_2;
+};
 
-std::vector<MixedTypedExample> examples_int32_2 = {
+std::vector<MixedTypedExample>& get_examples_int32_2() {
+static std::vector<MixedTypedExample> examples_int32_2 = {
 // Begin of an example
 {
 .operands = {
@@ -305,8 +330,11 @@
 },
 }, // End of an example
 };
+return examples_int32_2;
+};
 
-std::vector<MixedTypedExample> examples_float16_2 = {
+std::vector<MixedTypedExample>& get_examples_float16_2() {
+static std::vector<MixedTypedExample> examples_float16_2 = {
 // Begin of an example
 {
 .operands = {
@@ -339,8 +367,11 @@
 },
 }, // End of an example
 };
+return examples_float16_2;
+};
 
-std::vector<MixedTypedExample> examples_3 = {
+std::vector<MixedTypedExample>& get_examples_3() {
+static std::vector<MixedTypedExample> examples_3 = {
 // Begin of an example
 {
 .operands = {
@@ -373,8 +404,11 @@
 },
 }, // End of an example
 };
+return examples_3;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_3 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_3() {
+static std::vector<MixedTypedExample> examples_relaxed_3 = {
 // Begin of an example
 {
 .operands = {
@@ -407,8 +441,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_3;
+};
 
-std::vector<MixedTypedExample> examples_quant8_3 = {
+std::vector<MixedTypedExample>& get_examples_quant8_3() {
+static std::vector<MixedTypedExample> examples_quant8_3 = {
 // Begin of an example
 {
 .operands = {
@@ -441,8 +478,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_3;
+};
 
-std::vector<MixedTypedExample> examples_int32_3 = {
+std::vector<MixedTypedExample>& get_examples_int32_3() {
+static std::vector<MixedTypedExample> examples_int32_3 = {
 // Begin of an example
 {
 .operands = {
@@ -475,8 +515,11 @@
 },
 }, // End of an example
 };
+return examples_int32_3;
+};
 
-std::vector<MixedTypedExample> examples_float16_3 = {
+std::vector<MixedTypedExample>& get_examples_float16_3() {
+static std::vector<MixedTypedExample> examples_float16_3 = {
 // Begin of an example
 {
 .operands = {
@@ -509,8 +552,11 @@
 },
 }, // End of an example
 };
+return examples_float16_3;
+};
 
-std::vector<MixedTypedExample> examples_4 = {
+std::vector<MixedTypedExample>& get_examples_4() {
+static std::vector<MixedTypedExample> examples_4 = {
 // Begin of an example
 {
 .operands = {
@@ -543,8 +589,11 @@
 },
 }, // End of an example
 };
+return examples_4;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_4 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_4() {
+static std::vector<MixedTypedExample> examples_relaxed_4 = {
 // Begin of an example
 {
 .operands = {
@@ -577,8 +626,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_4;
+};
 
-std::vector<MixedTypedExample> examples_quant8_4 = {
+std::vector<MixedTypedExample>& get_examples_quant8_4() {
+static std::vector<MixedTypedExample> examples_quant8_4 = {
 // Begin of an example
 {
 .operands = {
@@ -611,8 +663,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_4;
+};
 
-std::vector<MixedTypedExample> examples_int32_4 = {
+std::vector<MixedTypedExample>& get_examples_int32_4() {
+static std::vector<MixedTypedExample> examples_int32_4 = {
 // Begin of an example
 {
 .operands = {
@@ -645,8 +700,11 @@
 },
 }, // End of an example
 };
+return examples_int32_4;
+};
 
-std::vector<MixedTypedExample> examples_float16_4 = {
+std::vector<MixedTypedExample>& get_examples_float16_4() {
+static std::vector<MixedTypedExample> examples_float16_4 = {
 // Begin of an example
 {
 .operands = {
@@ -679,8 +737,11 @@
 },
 }, // End of an example
 };
+return examples_float16_4;
+};
 
-std::vector<MixedTypedExample> examples_5 = {
+std::vector<MixedTypedExample>& get_examples_5() {
+static std::vector<MixedTypedExample> examples_5 = {
 // Begin of an example
 {
 .operands = {
@@ -713,8 +774,11 @@
 },
 }, // End of an example
 };
+return examples_5;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_5 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_5() {
+static std::vector<MixedTypedExample> examples_relaxed_5 = {
 // Begin of an example
 {
 .operands = {
@@ -747,8 +811,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_5;
+};
 
-std::vector<MixedTypedExample> examples_quant8_5 = {
+std::vector<MixedTypedExample>& get_examples_quant8_5() {
+static std::vector<MixedTypedExample> examples_quant8_5 = {
 // Begin of an example
 {
 .operands = {
@@ -781,8 +848,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_5;
+};
 
-std::vector<MixedTypedExample> examples_int32_5 = {
+std::vector<MixedTypedExample>& get_examples_int32_5() {
+static std::vector<MixedTypedExample> examples_int32_5 = {
 // Begin of an example
 {
 .operands = {
@@ -815,8 +885,11 @@
 },
 }, // End of an example
 };
+return examples_int32_5;
+};
 
-std::vector<MixedTypedExample> examples_float16_5 = {
+std::vector<MixedTypedExample>& get_examples_float16_5() {
+static std::vector<MixedTypedExample> examples_float16_5 = {
 // Begin of an example
 {
 .operands = {
@@ -849,8 +922,11 @@
 },
 }, // End of an example
 };
+return examples_float16_5;
+};
 
-std::vector<MixedTypedExample> examples_6 = {
+std::vector<MixedTypedExample>& get_examples_6() {
+static std::vector<MixedTypedExample> examples_6 = {
 // Begin of an example
 {
 .operands = {
@@ -883,8 +959,11 @@
 },
 }, // End of an example
 };
+return examples_6;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_6 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_6() {
+static std::vector<MixedTypedExample> examples_relaxed_6 = {
 // Begin of an example
 {
 .operands = {
@@ -917,8 +996,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_6;
+};
 
-std::vector<MixedTypedExample> examples_quant8_6 = {
+std::vector<MixedTypedExample>& get_examples_quant8_6() {
+static std::vector<MixedTypedExample> examples_quant8_6 = {
 // Begin of an example
 {
 .operands = {
@@ -951,8 +1033,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_6;
+};
 
-std::vector<MixedTypedExample> examples_int32_6 = {
+std::vector<MixedTypedExample>& get_examples_int32_6() {
+static std::vector<MixedTypedExample> examples_int32_6 = {
 // Begin of an example
 {
 .operands = {
@@ -985,8 +1070,11 @@
 },
 }, // End of an example
 };
+return examples_int32_6;
+};
 
-std::vector<MixedTypedExample> examples_float16_6 = {
+std::vector<MixedTypedExample>& get_examples_float16_6() {
+static std::vector<MixedTypedExample> examples_float16_6 = {
 // Begin of an example
 {
 .operands = {
@@ -1019,8 +1107,11 @@
 },
 }, // End of an example
 };
+return examples_float16_6;
+};
 
-std::vector<MixedTypedExample> examples_7 = {
+std::vector<MixedTypedExample>& get_examples_7() {
+static std::vector<MixedTypedExample> examples_7 = {
 // Begin of an example
 {
 .operands = {
@@ -1053,8 +1144,11 @@
 },
 }, // End of an example
 };
+return examples_7;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_7 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_7() {
+static std::vector<MixedTypedExample> examples_relaxed_7 = {
 // Begin of an example
 {
 .operands = {
@@ -1087,8 +1181,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_7;
+};
 
-std::vector<MixedTypedExample> examples_quant8_7 = {
+std::vector<MixedTypedExample>& get_examples_quant8_7() {
+static std::vector<MixedTypedExample> examples_quant8_7 = {
 // Begin of an example
 {
 .operands = {
@@ -1121,8 +1218,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_7;
+};
 
-std::vector<MixedTypedExample> examples_int32_7 = {
+std::vector<MixedTypedExample>& get_examples_int32_7() {
+static std::vector<MixedTypedExample> examples_int32_7 = {
 // Begin of an example
 {
 .operands = {
@@ -1155,8 +1255,11 @@
 },
 }, // End of an example
 };
+return examples_int32_7;
+};
 
-std::vector<MixedTypedExample> examples_float16_7 = {
+std::vector<MixedTypedExample>& get_examples_float16_7() {
+static std::vector<MixedTypedExample> examples_float16_7 = {
 // Begin of an example
 {
 .operands = {
@@ -1189,8 +1292,11 @@
 },
 }, // End of an example
 };
+return examples_float16_7;
+};
 
-std::vector<MixedTypedExample> examples_8 = {
+std::vector<MixedTypedExample>& get_examples_8() {
+static std::vector<MixedTypedExample> examples_8 = {
 // Begin of an example
 {
 .operands = {
@@ -1223,8 +1329,11 @@
 },
 }, // End of an example
 };
+return examples_8;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_8 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_8() {
+static std::vector<MixedTypedExample> examples_relaxed_8 = {
 // Begin of an example
 {
 .operands = {
@@ -1257,8 +1366,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_8;
+};
 
-std::vector<MixedTypedExample> examples_quant8_8 = {
+std::vector<MixedTypedExample>& get_examples_quant8_8() {
+static std::vector<MixedTypedExample> examples_quant8_8 = {
 // Begin of an example
 {
 .operands = {
@@ -1291,8 +1403,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_8;
+};
 
-std::vector<MixedTypedExample> examples_int32_8 = {
+std::vector<MixedTypedExample>& get_examples_int32_8() {
+static std::vector<MixedTypedExample> examples_int32_8 = {
 // Begin of an example
 {
 .operands = {
@@ -1325,8 +1440,11 @@
 },
 }, // End of an example
 };
+return examples_int32_8;
+};
 
-std::vector<MixedTypedExample> examples_float16_8 = {
+std::vector<MixedTypedExample>& get_examples_float16_8() {
+static std::vector<MixedTypedExample> examples_float16_8 = {
 // Begin of an example
 {
 .operands = {
@@ -1359,4 +1477,6 @@
 },
 }, // End of an example
 };
+return examples_float16_8;
+};
 
diff --git a/nn/runtime/test/generated/examples/gather_higher_rank.example.cpp b/nn/runtime/test/generated/examples/gather_higher_rank.example.cpp
index 3f40182..7e3e09c 100644
--- a/nn/runtime/test/generated/examples/gather_higher_rank.example.cpp
+++ b/nn/runtime/test/generated/examples/gather_higher_rank.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: gather_higher_rank.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
-std::vector<MixedTypedExample> examples_relaxed = {
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_quant8 = {
+std::vector<MixedTypedExample>& get_examples_quant8() {
+static std::vector<MixedTypedExample> examples_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +108,11 @@
 },
 }, // End of an example
 };
+return examples_quant8;
+};
 
-std::vector<MixedTypedExample> examples_int32 = {
+std::vector<MixedTypedExample>& get_examples_int32() {
+static std::vector<MixedTypedExample> examples_int32 = {
 // Begin of an example
 {
 .operands = {
@@ -135,4 +145,6 @@
 },
 }, // End of an example
 };
+return examples_int32;
+};
 
diff --git a/nn/runtime/test/generated/examples/grouped_conv2d.example.cpp b/nn/runtime/test/generated/examples/grouped_conv2d.example.cpp
index 48ba2fe..050fb5d 100644
--- a/nn/runtime/test/generated/examples/grouped_conv2d.example.cpp
+++ b/nn/runtime/test/generated/examples/grouped_conv2d.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: grouped_conv2d.mod.py). Do not edit
-std::vector<MixedTypedExample> examples_nhwc_none = {
+std::vector<MixedTypedExample>& get_examples_nhwc_none() {
+static std::vector<MixedTypedExample> examples_nhwc_none = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_none;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_none_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nhwc_none_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nhwc_none_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_none_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_none_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nhwc_none_relaxed() {
+static std::vector<MixedTypedExample> examples_nhwc_none_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +108,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_none_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_none_relaxed_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nhwc_none_relaxed_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nhwc_none_relaxed_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_none_relaxed_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_none_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_none_quant8() {
+static std::vector<MixedTypedExample> examples_nhwc_none_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -169,8 +182,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_none_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_none_quant8_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nhwc_none_quant8_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nhwc_none_quant8_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -203,8 +219,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_none_quant8_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu() {
+static std::vector<MixedTypedExample> examples_nhwc_relu = {
 // Begin of an example
 {
 .operands = {
@@ -237,8 +256,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nhwc_relu_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -271,8 +293,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu_relaxed() {
+static std::vector<MixedTypedExample> examples_nhwc_relu_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -305,8 +330,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu_relaxed_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu_relaxed_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nhwc_relu_relaxed_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -339,8 +367,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu_relaxed_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu_quant8() {
+static std::vector<MixedTypedExample> examples_nhwc_relu_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -373,8 +404,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu_quant8_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu_quant8_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nhwc_relu_quant8_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -407,8 +441,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu_quant8_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu1 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu1() {
+static std::vector<MixedTypedExample> examples_nhwc_relu1 = {
 // Begin of an example
 {
 .operands = {
@@ -441,8 +478,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu1;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu1_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu1_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nhwc_relu1_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -475,8 +515,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu1_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu1_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu1_relaxed() {
+static std::vector<MixedTypedExample> examples_nhwc_relu1_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -509,8 +552,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu1_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu1_relaxed_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu1_relaxed_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nhwc_relu1_relaxed_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -543,8 +589,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu1_relaxed_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu1_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu1_quant8() {
+static std::vector<MixedTypedExample> examples_nhwc_relu1_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -577,8 +626,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu1_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu1_quant8_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu1_quant8_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nhwc_relu1_quant8_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -611,8 +663,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu1_quant8_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu6 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu6() {
+static std::vector<MixedTypedExample> examples_nhwc_relu6 = {
 // Begin of an example
 {
 .operands = {
@@ -645,8 +700,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu6;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu6_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu6_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nhwc_relu6_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -679,8 +737,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu6_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu6_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu6_relaxed() {
+static std::vector<MixedTypedExample> examples_nhwc_relu6_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -713,8 +774,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu6_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu6_relaxed_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu6_relaxed_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nhwc_relu6_relaxed_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -747,8 +811,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu6_relaxed_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu6_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu6_quant8() {
+static std::vector<MixedTypedExample> examples_nhwc_relu6_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -781,8 +848,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu6_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu6_quant8_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu6_quant8_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nhwc_relu6_quant8_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -815,8 +885,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu6_quant8_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nchw_none = {
+std::vector<MixedTypedExample>& get_examples_nchw_none() {
+static std::vector<MixedTypedExample> examples_nchw_none = {
 // Begin of an example
 {
 .operands = {
@@ -849,8 +922,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_none;
+};
 
-std::vector<MixedTypedExample> examples_nchw_none_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nchw_none_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nchw_none_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -883,8 +959,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_none_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nchw_none_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nchw_none_relaxed() {
+static std::vector<MixedTypedExample> examples_nchw_none_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -917,8 +996,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_none_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nchw_none_relaxed_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nchw_none_relaxed_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nchw_none_relaxed_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -951,8 +1033,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_none_relaxed_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nchw_none_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nchw_none_quant8() {
+static std::vector<MixedTypedExample> examples_nchw_none_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -985,8 +1070,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_none_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nchw_none_quant8_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nchw_none_quant8_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nchw_none_quant8_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1019,8 +1107,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_none_quant8_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu() {
+static std::vector<MixedTypedExample> examples_nchw_relu = {
 // Begin of an example
 {
 .operands = {
@@ -1053,8 +1144,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nchw_relu_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1087,8 +1181,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu_relaxed() {
+static std::vector<MixedTypedExample> examples_nchw_relu_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -1121,8 +1218,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu_relaxed_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu_relaxed_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nchw_relu_relaxed_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1155,8 +1255,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu_relaxed_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu_quant8() {
+static std::vector<MixedTypedExample> examples_nchw_relu_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -1189,8 +1292,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu_quant8_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu_quant8_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nchw_relu_quant8_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1223,8 +1329,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu_quant8_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu1 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu1() {
+static std::vector<MixedTypedExample> examples_nchw_relu1 = {
 // Begin of an example
 {
 .operands = {
@@ -1257,8 +1366,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu1;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu1_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu1_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nchw_relu1_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1291,8 +1403,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu1_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu1_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu1_relaxed() {
+static std::vector<MixedTypedExample> examples_nchw_relu1_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -1325,8 +1440,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu1_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu1_relaxed_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu1_relaxed_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nchw_relu1_relaxed_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1359,8 +1477,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu1_relaxed_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu1_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu1_quant8() {
+static std::vector<MixedTypedExample> examples_nchw_relu1_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -1393,8 +1514,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu1_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu1_quant8_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu1_quant8_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nchw_relu1_quant8_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1427,8 +1551,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu1_quant8_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu6 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu6() {
+static std::vector<MixedTypedExample> examples_nchw_relu6 = {
 // Begin of an example
 {
 .operands = {
@@ -1461,8 +1588,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu6;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu6_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu6_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nchw_relu6_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1495,8 +1625,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu6_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu6_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu6_relaxed() {
+static std::vector<MixedTypedExample> examples_nchw_relu6_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -1529,8 +1662,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu6_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu6_relaxed_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu6_relaxed_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nchw_relu6_relaxed_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1563,8 +1699,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu6_relaxed_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu6_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu6_quant8() {
+static std::vector<MixedTypedExample> examples_nchw_relu6_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -1597,8 +1736,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu6_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu6_quant8_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu6_quant8_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nchw_relu6_quant8_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1631,8 +1773,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu6_quant8_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_large_nhwc = {
+std::vector<MixedTypedExample>& get_examples_large_nhwc() {
+static std::vector<MixedTypedExample> examples_large_nhwc = {
 // Begin of an example
 {
 .operands = {
@@ -1665,8 +1810,11 @@
 },
 }, // End of an example
 };
+return examples_large_nhwc;
+};
 
-std::vector<MixedTypedExample> examples_large_nhwc_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_large_nhwc_weight_as_input() {
+static std::vector<MixedTypedExample> examples_large_nhwc_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1699,8 +1847,11 @@
 },
 }, // End of an example
 };
+return examples_large_nhwc_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_large_nhwc_relaxed = {
+std::vector<MixedTypedExample>& get_examples_large_nhwc_relaxed() {
+static std::vector<MixedTypedExample> examples_large_nhwc_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -1733,8 +1884,11 @@
 },
 }, // End of an example
 };
+return examples_large_nhwc_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_large_nhwc_relaxed_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_large_nhwc_relaxed_weight_as_input() {
+static std::vector<MixedTypedExample> examples_large_nhwc_relaxed_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1767,8 +1921,11 @@
 },
 }, // End of an example
 };
+return examples_large_nhwc_relaxed_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_large_nhwc_quant8 = {
+std::vector<MixedTypedExample>& get_examples_large_nhwc_quant8() {
+static std::vector<MixedTypedExample> examples_large_nhwc_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -1801,8 +1958,11 @@
 },
 }, // End of an example
 };
+return examples_large_nhwc_quant8;
+};
 
-std::vector<MixedTypedExample> examples_large_nhwc_quant8_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_large_nhwc_quant8_weight_as_input() {
+static std::vector<MixedTypedExample> examples_large_nhwc_quant8_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1835,8 +1995,11 @@
 },
 }, // End of an example
 };
+return examples_large_nhwc_quant8_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_large_nchw = {
+std::vector<MixedTypedExample>& get_examples_large_nchw() {
+static std::vector<MixedTypedExample> examples_large_nchw = {
 // Begin of an example
 {
 .operands = {
@@ -1869,8 +2032,11 @@
 },
 }, // End of an example
 };
+return examples_large_nchw;
+};
 
-std::vector<MixedTypedExample> examples_large_nchw_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_large_nchw_weight_as_input() {
+static std::vector<MixedTypedExample> examples_large_nchw_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1903,8 +2069,11 @@
 },
 }, // End of an example
 };
+return examples_large_nchw_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_large_nchw_relaxed = {
+std::vector<MixedTypedExample>& get_examples_large_nchw_relaxed() {
+static std::vector<MixedTypedExample> examples_large_nchw_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -1937,8 +2106,11 @@
 },
 }, // End of an example
 };
+return examples_large_nchw_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_large_nchw_relaxed_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_large_nchw_relaxed_weight_as_input() {
+static std::vector<MixedTypedExample> examples_large_nchw_relaxed_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1971,8 +2143,11 @@
 },
 }, // End of an example
 };
+return examples_large_nchw_relaxed_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_large_nchw_quant8 = {
+std::vector<MixedTypedExample>& get_examples_large_nchw_quant8() {
+static std::vector<MixedTypedExample> examples_large_nchw_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -2005,8 +2180,11 @@
 },
 }, // End of an example
 };
+return examples_large_nchw_quant8;
+};
 
-std::vector<MixedTypedExample> examples_large_nchw_quant8_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_large_nchw_quant8_weight_as_input() {
+static std::vector<MixedTypedExample> examples_large_nchw_quant8_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -2039,8 +2217,11 @@
 },
 }, // End of an example
 };
+return examples_large_nchw_quant8_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_channel_nhwc = {
+std::vector<MixedTypedExample>& get_examples_channel_nhwc() {
+static std::vector<MixedTypedExample> examples_channel_nhwc = {
 // Begin of an example
 {
 .operands = {
@@ -2073,8 +2254,11 @@
 },
 }, // End of an example
 };
+return examples_channel_nhwc;
+};
 
-std::vector<MixedTypedExample> examples_channel_nhwc_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_channel_nhwc_weight_as_input() {
+static std::vector<MixedTypedExample> examples_channel_nhwc_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -2107,8 +2291,11 @@
 },
 }, // End of an example
 };
+return examples_channel_nhwc_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_channel_nhwc_relaxed = {
+std::vector<MixedTypedExample>& get_examples_channel_nhwc_relaxed() {
+static std::vector<MixedTypedExample> examples_channel_nhwc_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -2141,8 +2328,11 @@
 },
 }, // End of an example
 };
+return examples_channel_nhwc_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_channel_nhwc_relaxed_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_channel_nhwc_relaxed_weight_as_input() {
+static std::vector<MixedTypedExample> examples_channel_nhwc_relaxed_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -2175,8 +2365,11 @@
 },
 }, // End of an example
 };
+return examples_channel_nhwc_relaxed_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_channel_nhwc_quant8 = {
+std::vector<MixedTypedExample>& get_examples_channel_nhwc_quant8() {
+static std::vector<MixedTypedExample> examples_channel_nhwc_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -2209,8 +2402,11 @@
 },
 }, // End of an example
 };
+return examples_channel_nhwc_quant8;
+};
 
-std::vector<MixedTypedExample> examples_channel_nhwc_quant8_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_channel_nhwc_quant8_weight_as_input() {
+static std::vector<MixedTypedExample> examples_channel_nhwc_quant8_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -2243,8 +2439,11 @@
 },
 }, // End of an example
 };
+return examples_channel_nhwc_quant8_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_channel_nchw = {
+std::vector<MixedTypedExample>& get_examples_channel_nchw() {
+static std::vector<MixedTypedExample> examples_channel_nchw = {
 // Begin of an example
 {
 .operands = {
@@ -2277,8 +2476,11 @@
 },
 }, // End of an example
 };
+return examples_channel_nchw;
+};
 
-std::vector<MixedTypedExample> examples_channel_nchw_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_channel_nchw_weight_as_input() {
+static std::vector<MixedTypedExample> examples_channel_nchw_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -2311,8 +2513,11 @@
 },
 }, // End of an example
 };
+return examples_channel_nchw_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_channel_nchw_relaxed = {
+std::vector<MixedTypedExample>& get_examples_channel_nchw_relaxed() {
+static std::vector<MixedTypedExample> examples_channel_nchw_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -2345,8 +2550,11 @@
 },
 }, // End of an example
 };
+return examples_channel_nchw_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_channel_nchw_relaxed_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_channel_nchw_relaxed_weight_as_input() {
+static std::vector<MixedTypedExample> examples_channel_nchw_relaxed_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -2379,8 +2587,11 @@
 },
 }, // End of an example
 };
+return examples_channel_nchw_relaxed_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_channel_nchw_quant8 = {
+std::vector<MixedTypedExample>& get_examples_channel_nchw_quant8() {
+static std::vector<MixedTypedExample> examples_channel_nchw_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -2413,8 +2624,11 @@
 },
 }, // End of an example
 };
+return examples_channel_nchw_quant8;
+};
 
-std::vector<MixedTypedExample> examples_channel_nchw_quant8_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_channel_nchw_quant8_weight_as_input() {
+static std::vector<MixedTypedExample> examples_channel_nchw_quant8_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -2447,4 +2661,6 @@
 },
 }, // End of an example
 };
+return examples_channel_nchw_quant8_weight_as_input;
+};
 
diff --git a/nn/runtime/test/generated/examples/hashtable_lookup_float.example.cpp b/nn/runtime/test/generated/examples/hashtable_lookup_float.example.cpp
index 2176d28..9a5466f 100644
--- a/nn/runtime/test/generated/examples/hashtable_lookup_float.example.cpp
+++ b/nn/runtime/test/generated/examples/hashtable_lookup_float.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: hashtable_lookup_float.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/hashtable_lookup_float_relaxed.example.cpp b/nn/runtime/test/generated/examples/hashtable_lookup_float_relaxed.example.cpp
index 02291bd..37f1c4d 100644
--- a/nn/runtime/test/generated/examples/hashtable_lookup_float_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/hashtable_lookup_float_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: hashtable_lookup_float_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/hashtable_lookup_quant8.example.cpp b/nn/runtime/test/generated/examples/hashtable_lookup_quant8.example.cpp
index 208d72e..8b352a0 100644
--- a/nn/runtime/test/generated/examples/hashtable_lookup_quant8.example.cpp
+++ b/nn/runtime/test/generated/examples/hashtable_lookup_quant8.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: hashtable_lookup_quant8.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/heatmap_max_keypoint.example.cpp b/nn/runtime/test/generated/examples/heatmap_max_keypoint.example.cpp
index 6584d05..a32c2fe 100644
--- a/nn/runtime/test/generated/examples/heatmap_max_keypoint.example.cpp
+++ b/nn/runtime/test/generated/examples/heatmap_max_keypoint.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: heatmap_max_keypoint.mod.py). Do not edit
-std::vector<MixedTypedExample> examples_nhwc = {
+std::vector<MixedTypedExample>& get_examples_nhwc() {
+static std::vector<MixedTypedExample> examples_nhwc = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nchw = {
+std::vector<MixedTypedExample>& get_examples_nchw() {
+static std::vector<MixedTypedExample> examples_nchw = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +108,11 @@
 },
 }, // End of an example
 };
+return examples_nchw;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_2() {
+static std::vector<MixedTypedExample> examples_nhwc_2 = {
 // Begin of an example
 {
 .operands = {
@@ -169,8 +182,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -203,8 +219,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_2() {
+static std::vector<MixedTypedExample> examples_nchw_2 = {
 // Begin of an example
 {
 .operands = {
@@ -237,8 +256,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -271,4 +293,6 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed_2;
+};
 
diff --git a/nn/runtime/test/generated/examples/l2_normalization.example.cpp b/nn/runtime/test/generated/examples/l2_normalization.example.cpp
index fa2ffe3..0ad0c1c 100644
--- a/nn/runtime/test/generated/examples/l2_normalization.example.cpp
+++ b/nn/runtime/test/generated/examples/l2_normalization.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: l2_normalization.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/l2_normalization_2.example.cpp b/nn/runtime/test/generated/examples/l2_normalization_2.example.cpp
index 868bb7a..6a39ed9 100644
--- a/nn/runtime/test/generated/examples/l2_normalization_2.example.cpp
+++ b/nn/runtime/test/generated/examples/l2_normalization_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: l2_normalization_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/l2_normalization_2_relaxed.example.cpp b/nn/runtime/test/generated/examples/l2_normalization_2_relaxed.example.cpp
index 25064a5..978c626 100644
--- a/nn/runtime/test/generated/examples/l2_normalization_2_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/l2_normalization_2_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: l2_normalization_2_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/l2_normalization_large.example.cpp b/nn/runtime/test/generated/examples/l2_normalization_large.example.cpp
index 5280373..e00b11b 100644
--- a/nn/runtime/test/generated/examples/l2_normalization_large.example.cpp
+++ b/nn/runtime/test/generated/examples/l2_normalization_large.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: l2_normalization_large.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/l2_normalization_large_relaxed.example.cpp b/nn/runtime/test/generated/examples/l2_normalization_large_relaxed.example.cpp
index 182a911..10a3d5f 100644
--- a/nn/runtime/test/generated/examples/l2_normalization_large_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/l2_normalization_large_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: l2_normalization_large_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/l2_normalization_relaxed.example.cpp b/nn/runtime/test/generated/examples/l2_normalization_relaxed.example.cpp
index 30cefeb..f07f5b0 100644
--- a/nn/runtime/test/generated/examples/l2_normalization_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/l2_normalization_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: l2_normalization_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/l2_normalization_v1_2.example.cpp b/nn/runtime/test/generated/examples/l2_normalization_v1_2.example.cpp
index 9394d14..0db3cd7 100644
--- a/nn/runtime/test/generated/examples/l2_normalization_v1_2.example.cpp
+++ b/nn/runtime/test/generated/examples/l2_normalization_v1_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: l2_normalization_v1_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples_dim1_axis0 = {
+std::vector<MixedTypedExample>& get_examples_dim1_axis0() {
+static std::vector<MixedTypedExample> examples_dim1_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples_dim1_axis0;
+};
 
-std::vector<MixedTypedExample> examples_dim2_axis1 = {
+std::vector<MixedTypedExample>& get_examples_dim2_axis1() {
+static std::vector<MixedTypedExample> examples_dim2_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_dim2_axis1;
+};
 
-std::vector<MixedTypedExample> examples_dim3_axis2 = {
+std::vector<MixedTypedExample>& get_examples_dim3_axis2() {
+static std::vector<MixedTypedExample> examples_dim3_axis2 = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +108,11 @@
 },
 }, // End of an example
 };
+return examples_dim3_axis2;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_dim1_axis0 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_dim1_axis0() {
+static std::vector<MixedTypedExample> examples_relaxed_dim1_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_dim1_axis0;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_dim2_axis1 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_dim2_axis1() {
+static std::vector<MixedTypedExample> examples_relaxed_dim2_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -169,8 +182,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_dim2_axis1;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_dim3_axis2 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_dim3_axis2() {
+static std::vector<MixedTypedExample> examples_relaxed_dim3_axis2 = {
 // Begin of an example
 {
 .operands = {
@@ -203,8 +219,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_dim3_axis2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis0() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -237,8 +256,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -271,8 +293,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis1 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis1() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -305,8 +330,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis1;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis1_neg() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -339,8 +367,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis2() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis2 = {
 // Begin of an example
 {
 .operands = {
@@ -373,8 +404,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis2_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis2_neg() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis2_neg = {
 // Begin of an example
 {
 .operands = {
@@ -407,8 +441,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis2_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis3 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis3() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis3 = {
 // Begin of an example
 {
 .operands = {
@@ -441,8 +478,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis3;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis3_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis3_neg() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis3_neg = {
 // Begin of an example
 {
 .operands = {
@@ -475,8 +515,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis3_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis0() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -509,8 +552,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -543,8 +589,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis1 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis1() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -577,8 +626,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis1;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis1_neg() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -611,8 +663,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis2() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis2 = {
 // Begin of an example
 {
 .operands = {
@@ -645,8 +700,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis2_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis2_neg() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis2_neg = {
 // Begin of an example
 {
 .operands = {
@@ -679,8 +737,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis2_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim2_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim2_axis0() {
+static std::vector<MixedTypedExample> examples_axis_dim2_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -713,8 +774,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim2_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim2_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_dim2_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_dim2_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -747,8 +811,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim2_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim2_axis1 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim2_axis1() {
+static std::vector<MixedTypedExample> examples_axis_dim2_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -781,8 +848,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim2_axis1;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim2_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_dim2_axis1_neg() {
+static std::vector<MixedTypedExample> examples_axis_dim2_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -815,8 +885,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim2_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim1_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim1_axis0() {
+static std::vector<MixedTypedExample> examples_axis_dim1_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -849,8 +922,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim1_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim1_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_dim1_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_dim1_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -883,8 +959,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim1_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis0() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -917,8 +996,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -951,8 +1033,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis1 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis1() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -985,8 +1070,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis1;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis1_neg() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1019,8 +1107,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis2 = {
 // Begin of an example
 {
 .operands = {
@@ -1053,8 +1144,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis2_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis2_neg() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis2_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1087,8 +1181,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis2_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis3 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis3() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis3 = {
 // Begin of an example
 {
 .operands = {
@@ -1121,8 +1218,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis3;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis3_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis3_neg() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis3_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1155,8 +1255,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis3_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis0() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -1189,8 +1292,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1223,8 +1329,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis1 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis1() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -1257,8 +1366,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis1;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis1_neg() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1291,8 +1403,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis2 = {
 // Begin of an example
 {
 .operands = {
@@ -1325,8 +1440,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis2_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis2_neg() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis2_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1359,8 +1477,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis2_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim2_axis0() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -1393,8 +1514,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim2_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim2_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1427,8 +1551,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim2_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis1 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim2_axis1() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -1461,8 +1588,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim2_axis1;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim2_axis1_neg() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1495,8 +1625,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim2_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim1_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim1_axis0() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim1_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -1529,8 +1662,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim1_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim1_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim1_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim1_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1563,4 +1699,6 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim1_axis0_neg;
+};
 
diff --git a/nn/runtime/test/generated/examples/l2_pool_float.example.cpp b/nn/runtime/test/generated/examples/l2_pool_float.example.cpp
index 0b62d83..c08e934 100644
--- a/nn/runtime/test/generated/examples/l2_pool_float.example.cpp
+++ b/nn/runtime/test/generated/examples/l2_pool_float.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: l2_pool_float.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/l2_pool_float_2.example.cpp b/nn/runtime/test/generated/examples/l2_pool_float_2.example.cpp
index b647766..3e1f53c 100644
--- a/nn/runtime/test/generated/examples/l2_pool_float_2.example.cpp
+++ b/nn/runtime/test/generated/examples/l2_pool_float_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: l2_pool_float_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/l2_pool_float_2_relaxed.example.cpp b/nn/runtime/test/generated/examples/l2_pool_float_2_relaxed.example.cpp
index 9cdeaee..fd5a70a 100644
--- a/nn/runtime/test/generated/examples/l2_pool_float_2_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/l2_pool_float_2_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: l2_pool_float_2_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/l2_pool_float_large.example.cpp b/nn/runtime/test/generated/examples/l2_pool_float_large.example.cpp
index f816a32..1654526 100644
--- a/nn/runtime/test/generated/examples/l2_pool_float_large.example.cpp
+++ b/nn/runtime/test/generated/examples/l2_pool_float_large.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: l2_pool_float_large.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/l2_pool_float_large_relaxed.example.cpp b/nn/runtime/test/generated/examples/l2_pool_float_large_relaxed.example.cpp
index c71f640..478dbc3 100644
--- a/nn/runtime/test/generated/examples/l2_pool_float_large_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/l2_pool_float_large_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: l2_pool_float_large_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/l2_pool_float_relaxed.example.cpp b/nn/runtime/test/generated/examples/l2_pool_float_relaxed.example.cpp
index e5f04e8..d840bce 100644
--- a/nn/runtime/test/generated/examples/l2_pool_float_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/l2_pool_float_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: l2_pool_float_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/l2_pool_v1_2.example.cpp b/nn/runtime/test/generated/examples/l2_pool_v1_2.example.cpp
index 8acf266..320e6a9 100644
--- a/nn/runtime/test/generated/examples/l2_pool_v1_2.example.cpp
+++ b/nn/runtime/test/generated/examples/l2_pool_v1_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: l2_pool_v1_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples_nhwc = {
+std::vector<MixedTypedExample>& get_examples_nhwc() {
+static std::vector<MixedTypedExample> examples_nhwc = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nchw = {
+std::vector<MixedTypedExample>& get_examples_nchw() {
+static std::vector<MixedTypedExample> examples_nchw = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +108,11 @@
 },
 }, // End of an example
 };
+return examples_nchw;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_2() {
+static std::vector<MixedTypedExample> examples_nhwc_2 = {
 // Begin of an example
 {
 .operands = {
@@ -169,8 +182,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -203,8 +219,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_2() {
+static std::vector<MixedTypedExample> examples_nchw_2 = {
 // Begin of an example
 {
 .operands = {
@@ -237,8 +256,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -271,8 +293,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_large_nhwc = {
+std::vector<MixedTypedExample>& get_examples_large_nhwc() {
+static std::vector<MixedTypedExample> examples_large_nhwc = {
 // Begin of an example
 {
 .operands = {
@@ -305,8 +330,11 @@
 },
 }, // End of an example
 };
+return examples_large_nhwc;
+};
 
-std::vector<MixedTypedExample> examples_large_nhwc_relaxed = {
+std::vector<MixedTypedExample>& get_examples_large_nhwc_relaxed() {
+static std::vector<MixedTypedExample> examples_large_nhwc_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -339,8 +367,11 @@
 },
 }, // End of an example
 };
+return examples_large_nhwc_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_large_nchw = {
+std::vector<MixedTypedExample>& get_examples_large_nchw() {
+static std::vector<MixedTypedExample> examples_large_nchw = {
 // Begin of an example
 {
 .operands = {
@@ -373,8 +404,11 @@
 },
 }, // End of an example
 };
+return examples_large_nchw;
+};
 
-std::vector<MixedTypedExample> examples_large_nchw_relaxed = {
+std::vector<MixedTypedExample>& get_examples_large_nchw_relaxed() {
+static std::vector<MixedTypedExample> examples_large_nchw_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -407,4 +441,6 @@
 },
 }, // End of an example
 };
+return examples_large_nchw_relaxed;
+};
 
diff --git a/nn/runtime/test/generated/examples/layer_norm_lstm.example.cpp b/nn/runtime/test/generated/examples/layer_norm_lstm.example.cpp
index 422dcfe..d077091 100644
--- a/nn/runtime/test/generated/examples/layer_norm_lstm.example.cpp
+++ b/nn/runtime/test/generated/examples/layer_norm_lstm.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: layer_norm_lstm.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -95,4 +96,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/local_response_norm_float_1.example.cpp b/nn/runtime/test/generated/examples/local_response_norm_float_1.example.cpp
index 3722a73..fb1157c 100644
--- a/nn/runtime/test/generated/examples/local_response_norm_float_1.example.cpp
+++ b/nn/runtime/test/generated/examples/local_response_norm_float_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: local_response_norm_float_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/local_response_norm_float_1_relaxed.example.cpp b/nn/runtime/test/generated/examples/local_response_norm_float_1_relaxed.example.cpp
index 9362bb6..856267e 100644
--- a/nn/runtime/test/generated/examples/local_response_norm_float_1_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/local_response_norm_float_1_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: local_response_norm_float_1_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/local_response_norm_float_2.example.cpp b/nn/runtime/test/generated/examples/local_response_norm_float_2.example.cpp
index 1b9b0fa..7cf9bc3 100644
--- a/nn/runtime/test/generated/examples/local_response_norm_float_2.example.cpp
+++ b/nn/runtime/test/generated/examples/local_response_norm_float_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: local_response_norm_float_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/local_response_norm_float_2_relaxed.example.cpp b/nn/runtime/test/generated/examples/local_response_norm_float_2_relaxed.example.cpp
index 2b09c42..32e27cf 100644
--- a/nn/runtime/test/generated/examples/local_response_norm_float_2_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/local_response_norm_float_2_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: local_response_norm_float_2_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/local_response_norm_float_3.example.cpp b/nn/runtime/test/generated/examples/local_response_norm_float_3.example.cpp
index 1519a5b..cc30984 100644
--- a/nn/runtime/test/generated/examples/local_response_norm_float_3.example.cpp
+++ b/nn/runtime/test/generated/examples/local_response_norm_float_3.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: local_response_norm_float_3.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/local_response_norm_float_3_relaxed.example.cpp b/nn/runtime/test/generated/examples/local_response_norm_float_3_relaxed.example.cpp
index 4be8055..18e2208 100644
--- a/nn/runtime/test/generated/examples/local_response_norm_float_3_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/local_response_norm_float_3_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: local_response_norm_float_3_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/local_response_norm_float_4.example.cpp b/nn/runtime/test/generated/examples/local_response_norm_float_4.example.cpp
index 2ff72b9..6069747 100644
--- a/nn/runtime/test/generated/examples/local_response_norm_float_4.example.cpp
+++ b/nn/runtime/test/generated/examples/local_response_norm_float_4.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: local_response_norm_float_4.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/local_response_norm_float_4_relaxed.example.cpp b/nn/runtime/test/generated/examples/local_response_norm_float_4_relaxed.example.cpp
index 8c90386..90ff53b 100644
--- a/nn/runtime/test/generated/examples/local_response_norm_float_4_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/local_response_norm_float_4_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: local_response_norm_float_4_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/local_response_normalization_v1_2.example.cpp b/nn/runtime/test/generated/examples/local_response_normalization_v1_2.example.cpp
index c715237..890deb3 100644
--- a/nn/runtime/test/generated/examples/local_response_normalization_v1_2.example.cpp
+++ b/nn/runtime/test/generated/examples/local_response_normalization_v1_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: local_response_normalization_v1_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples_axis_dim4_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis0() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis1 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis1() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +108,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis1;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis1_neg() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis2() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis2 = {
 // Begin of an example
 {
 .operands = {
@@ -169,8 +182,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis2_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis2_neg() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis2_neg = {
 // Begin of an example
 {
 .operands = {
@@ -203,8 +219,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis2_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis3 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis3() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis3 = {
 // Begin of an example
 {
 .operands = {
@@ -237,8 +256,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis3;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis3_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis3_neg() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis3_neg = {
 // Begin of an example
 {
 .operands = {
@@ -271,8 +293,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis3_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis0() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -305,8 +330,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -339,8 +367,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis1 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis1() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -373,8 +404,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis1;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis1_neg() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -407,8 +441,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis2() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis2 = {
 // Begin of an example
 {
 .operands = {
@@ -441,8 +478,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis2_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis2_neg() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis2_neg = {
 // Begin of an example
 {
 .operands = {
@@ -475,8 +515,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis2_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim2_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim2_axis0() {
+static std::vector<MixedTypedExample> examples_axis_dim2_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -509,8 +552,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim2_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim2_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_dim2_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_dim2_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -543,8 +589,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim2_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim2_axis1 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim2_axis1() {
+static std::vector<MixedTypedExample> examples_axis_dim2_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -577,8 +626,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim2_axis1;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim2_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_dim2_axis1_neg() {
+static std::vector<MixedTypedExample> examples_axis_dim2_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -611,8 +663,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim2_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim1_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim1_axis0() {
+static std::vector<MixedTypedExample> examples_axis_dim1_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -645,8 +700,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim1_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim1_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_dim1_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_dim1_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -679,8 +737,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim1_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis0() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -713,8 +774,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -747,8 +811,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis1 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis1() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -781,8 +848,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis1;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis1_neg() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -815,8 +885,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis2 = {
 // Begin of an example
 {
 .operands = {
@@ -849,8 +922,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis2_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis2_neg() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis2_neg = {
 // Begin of an example
 {
 .operands = {
@@ -883,8 +959,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis2_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis3 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis3() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis3 = {
 // Begin of an example
 {
 .operands = {
@@ -917,8 +996,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis3;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis3_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis3_neg() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis3_neg = {
 // Begin of an example
 {
 .operands = {
@@ -951,8 +1033,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis3_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis0() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -985,8 +1070,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1019,8 +1107,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis1 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis1() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -1053,8 +1144,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis1;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis1_neg() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1087,8 +1181,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis2 = {
 // Begin of an example
 {
 .operands = {
@@ -1121,8 +1218,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis2_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis2_neg() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis2_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1155,8 +1255,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis2_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim2_axis0() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -1189,8 +1292,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim2_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim2_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1223,8 +1329,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim2_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis1 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim2_axis1() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -1257,8 +1366,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim2_axis1;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim2_axis1_neg() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1291,8 +1403,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim2_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim1_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim1_axis0() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim1_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -1325,8 +1440,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim1_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim1_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim1_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim1_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1359,8 +1477,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim1_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis0_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis0_2() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis0_2 = {
 // Begin of an example
 {
 .operands = {
@@ -1393,8 +1514,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis0_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis0_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis0_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis0_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -1427,8 +1551,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis0_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis1_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis1_2() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis1_2 = {
 // Begin of an example
 {
 .operands = {
@@ -1461,8 +1588,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis1_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis1_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis1_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis1_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -1495,8 +1625,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis1_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis2_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis2_2() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis2_2 = {
 // Begin of an example
 {
 .operands = {
@@ -1529,8 +1662,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis2_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis2_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis2_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis2_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -1563,8 +1699,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis2_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis3_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis3_2() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis3_2 = {
 // Begin of an example
 {
 .operands = {
@@ -1597,8 +1736,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis3_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis3_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis3_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis3_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -1631,8 +1773,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis3_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis0_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis0_2() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis0_2 = {
 // Begin of an example
 {
 .operands = {
@@ -1665,8 +1810,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis0_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis0_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis0_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis0_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -1699,8 +1847,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis0_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis1_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis1_2() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis1_2 = {
 // Begin of an example
 {
 .operands = {
@@ -1733,8 +1884,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis1_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis1_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis1_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis1_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -1767,8 +1921,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis1_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis2_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis2_2() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis2_2 = {
 // Begin of an example
 {
 .operands = {
@@ -1801,8 +1958,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis2_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis2_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis2_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis2_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -1835,8 +1995,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis2_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim2_axis0_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim2_axis0_2() {
+static std::vector<MixedTypedExample> examples_axis_dim2_axis0_2 = {
 // Begin of an example
 {
 .operands = {
@@ -1869,8 +2032,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim2_axis0_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim2_axis0_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim2_axis0_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_dim2_axis0_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -1903,8 +2069,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim2_axis0_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim2_axis1_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim2_axis1_2() {
+static std::vector<MixedTypedExample> examples_axis_dim2_axis1_2 = {
 // Begin of an example
 {
 .operands = {
@@ -1937,8 +2106,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim2_axis1_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim2_axis1_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim2_axis1_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_dim2_axis1_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -1971,8 +2143,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim2_axis1_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim1_axis0_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim1_axis0_2() {
+static std::vector<MixedTypedExample> examples_axis_dim1_axis0_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2005,8 +2180,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim1_axis0_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim1_axis0_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim1_axis0_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_dim1_axis0_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2039,8 +2217,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim1_axis0_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis0_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis0_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis0_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2073,8 +2254,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis0_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis0_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis0_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis0_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2107,8 +2291,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis0_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis1_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis1_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis1_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2141,8 +2328,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis1_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis1_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis1_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis1_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2175,8 +2365,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis1_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis2_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis2_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis2_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2209,8 +2402,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis2_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis2_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis2_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis2_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2243,8 +2439,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis2_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis3_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis3_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis3_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2277,8 +2476,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis3_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis3_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis3_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis3_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2311,8 +2513,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis3_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis0_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis0_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis0_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2345,8 +2550,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis0_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis0_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis0_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis0_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2379,8 +2587,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis0_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis1_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis1_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis1_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2413,8 +2624,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis1_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis1_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis1_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis1_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2447,8 +2661,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis1_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis2_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis2_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis2_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2481,8 +2698,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis2_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis2_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis2_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis2_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2515,8 +2735,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis2_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis0_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim2_axis0_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis0_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2549,8 +2772,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim2_axis0_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis0_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim2_axis0_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis0_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2583,8 +2809,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim2_axis0_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis1_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim2_axis1_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis1_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2617,8 +2846,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim2_axis1_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis1_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim2_axis1_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis1_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2651,8 +2883,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim2_axis1_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim1_axis0_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim1_axis0_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim1_axis0_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2685,8 +2920,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim1_axis0_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim1_axis0_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim1_axis0_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim1_axis0_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2719,8 +2957,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim1_axis0_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis0_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis0_3() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis0_3 = {
 // Begin of an example
 {
 .operands = {
@@ -2753,8 +2994,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis0_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis0_neg_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis0_neg_3() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis0_neg_3 = {
 // Begin of an example
 {
 .operands = {
@@ -2787,8 +3031,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis0_neg_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis1_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis1_3() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis1_3 = {
 // Begin of an example
 {
 .operands = {
@@ -2821,8 +3068,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis1_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis1_neg_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis1_neg_3() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis1_neg_3 = {
 // Begin of an example
 {
 .operands = {
@@ -2855,8 +3105,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis1_neg_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis2_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis2_3() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis2_3 = {
 // Begin of an example
 {
 .operands = {
@@ -2889,8 +3142,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis2_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis2_neg_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis2_neg_3() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis2_neg_3 = {
 // Begin of an example
 {
 .operands = {
@@ -2923,8 +3179,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis2_neg_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis3_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis3_3() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis3_3 = {
 // Begin of an example
 {
 .operands = {
@@ -2957,8 +3216,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis3_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis3_neg_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis3_neg_3() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis3_neg_3 = {
 // Begin of an example
 {
 .operands = {
@@ -2991,8 +3253,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis3_neg_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis0_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis0_3() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis0_3 = {
 // Begin of an example
 {
 .operands = {
@@ -3025,8 +3290,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis0_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis0_neg_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis0_neg_3() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis0_neg_3 = {
 // Begin of an example
 {
 .operands = {
@@ -3059,8 +3327,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis0_neg_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis1_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis1_3() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis1_3 = {
 // Begin of an example
 {
 .operands = {
@@ -3093,8 +3364,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis1_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis1_neg_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis1_neg_3() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis1_neg_3 = {
 // Begin of an example
 {
 .operands = {
@@ -3127,8 +3401,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis1_neg_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis2_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis2_3() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis2_3 = {
 // Begin of an example
 {
 .operands = {
@@ -3161,8 +3438,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis2_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis2_neg_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis2_neg_3() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis2_neg_3 = {
 // Begin of an example
 {
 .operands = {
@@ -3195,8 +3475,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis2_neg_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim2_axis0_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim2_axis0_3() {
+static std::vector<MixedTypedExample> examples_axis_dim2_axis0_3 = {
 // Begin of an example
 {
 .operands = {
@@ -3229,8 +3512,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim2_axis0_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim2_axis0_neg_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim2_axis0_neg_3() {
+static std::vector<MixedTypedExample> examples_axis_dim2_axis0_neg_3 = {
 // Begin of an example
 {
 .operands = {
@@ -3263,8 +3549,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim2_axis0_neg_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim2_axis1_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim2_axis1_3() {
+static std::vector<MixedTypedExample> examples_axis_dim2_axis1_3 = {
 // Begin of an example
 {
 .operands = {
@@ -3297,8 +3586,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim2_axis1_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim2_axis1_neg_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim2_axis1_neg_3() {
+static std::vector<MixedTypedExample> examples_axis_dim2_axis1_neg_3 = {
 // Begin of an example
 {
 .operands = {
@@ -3331,8 +3623,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim2_axis1_neg_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim1_axis0_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim1_axis0_3() {
+static std::vector<MixedTypedExample> examples_axis_dim1_axis0_3 = {
 // Begin of an example
 {
 .operands = {
@@ -3365,8 +3660,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim1_axis0_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim1_axis0_neg_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim1_axis0_neg_3() {
+static std::vector<MixedTypedExample> examples_axis_dim1_axis0_neg_3 = {
 // Begin of an example
 {
 .operands = {
@@ -3399,8 +3697,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim1_axis0_neg_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis0_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis0_3() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis0_3 = {
 // Begin of an example
 {
 .operands = {
@@ -3433,8 +3734,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis0_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis0_neg_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis0_neg_3() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis0_neg_3 = {
 // Begin of an example
 {
 .operands = {
@@ -3467,8 +3771,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis0_neg_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis1_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis1_3() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis1_3 = {
 // Begin of an example
 {
 .operands = {
@@ -3501,8 +3808,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis1_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis1_neg_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis1_neg_3() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis1_neg_3 = {
 // Begin of an example
 {
 .operands = {
@@ -3535,8 +3845,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis1_neg_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis2_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis2_3() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis2_3 = {
 // Begin of an example
 {
 .operands = {
@@ -3569,8 +3882,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis2_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis2_neg_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis2_neg_3() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis2_neg_3 = {
 // Begin of an example
 {
 .operands = {
@@ -3603,8 +3919,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis2_neg_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis3_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis3_3() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis3_3 = {
 // Begin of an example
 {
 .operands = {
@@ -3637,8 +3956,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis3_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis3_neg_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis3_neg_3() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis3_neg_3 = {
 // Begin of an example
 {
 .operands = {
@@ -3671,8 +3993,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis3_neg_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis0_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis0_3() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis0_3 = {
 // Begin of an example
 {
 .operands = {
@@ -3705,8 +4030,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis0_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis0_neg_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis0_neg_3() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis0_neg_3 = {
 // Begin of an example
 {
 .operands = {
@@ -3739,8 +4067,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis0_neg_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis1_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis1_3() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis1_3 = {
 // Begin of an example
 {
 .operands = {
@@ -3773,8 +4104,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis1_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis1_neg_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis1_neg_3() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis1_neg_3 = {
 // Begin of an example
 {
 .operands = {
@@ -3807,8 +4141,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis1_neg_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis2_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis2_3() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis2_3 = {
 // Begin of an example
 {
 .operands = {
@@ -3841,8 +4178,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis2_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis2_neg_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis2_neg_3() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis2_neg_3 = {
 // Begin of an example
 {
 .operands = {
@@ -3875,8 +4215,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis2_neg_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis0_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim2_axis0_3() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis0_3 = {
 // Begin of an example
 {
 .operands = {
@@ -3909,8 +4252,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim2_axis0_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis0_neg_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim2_axis0_neg_3() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis0_neg_3 = {
 // Begin of an example
 {
 .operands = {
@@ -3943,8 +4289,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim2_axis0_neg_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis1_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim2_axis1_3() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis1_3 = {
 // Begin of an example
 {
 .operands = {
@@ -3977,8 +4326,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim2_axis1_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis1_neg_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim2_axis1_neg_3() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis1_neg_3 = {
 // Begin of an example
 {
 .operands = {
@@ -4011,8 +4363,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim2_axis1_neg_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim1_axis0_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim1_axis0_3() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim1_axis0_3 = {
 // Begin of an example
 {
 .operands = {
@@ -4045,8 +4400,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim1_axis0_3;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim1_axis0_neg_3 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim1_axis0_neg_3() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim1_axis0_neg_3 = {
 // Begin of an example
 {
 .operands = {
@@ -4079,8 +4437,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim1_axis0_neg_3;
+};
 
-std::vector<MixedTypedExample> examples_dim1_axis0 = {
+std::vector<MixedTypedExample>& get_examples_dim1_axis0() {
+static std::vector<MixedTypedExample> examples_dim1_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -4113,8 +4474,11 @@
 },
 }, // End of an example
 };
+return examples_dim1_axis0;
+};
 
-std::vector<MixedTypedExample> examples_dim2_axis1 = {
+std::vector<MixedTypedExample>& get_examples_dim2_axis1() {
+static std::vector<MixedTypedExample> examples_dim2_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -4147,8 +4511,11 @@
 },
 }, // End of an example
 };
+return examples_dim2_axis1;
+};
 
-std::vector<MixedTypedExample> examples_dim3_axis2 = {
+std::vector<MixedTypedExample>& get_examples_dim3_axis2() {
+static std::vector<MixedTypedExample> examples_dim3_axis2 = {
 // Begin of an example
 {
 .operands = {
@@ -4181,8 +4548,11 @@
 },
 }, // End of an example
 };
+return examples_dim3_axis2;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_dim1_axis0 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_dim1_axis0() {
+static std::vector<MixedTypedExample> examples_relaxed_dim1_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -4215,8 +4585,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_dim1_axis0;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_dim2_axis1 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_dim2_axis1() {
+static std::vector<MixedTypedExample> examples_relaxed_dim2_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -4249,8 +4622,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_dim2_axis1;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_dim3_axis2 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_dim3_axis2() {
+static std::vector<MixedTypedExample> examples_relaxed_dim3_axis2 = {
 // Begin of an example
 {
 .operands = {
@@ -4283,4 +4659,6 @@
 },
 }, // End of an example
 };
+return examples_relaxed_dim3_axis2;
+};
 
diff --git a/nn/runtime/test/generated/examples/logistic_float16_1.example.cpp b/nn/runtime/test/generated/examples/logistic_float16_1.example.cpp
index 67ad0f6..95707c0 100644
--- a/nn/runtime/test/generated/examples/logistic_float16_1.example.cpp
+++ b/nn/runtime/test/generated/examples/logistic_float16_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: logistic_float16_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -12,7 +13,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 4.0f, 8.0f}}},
@@ -25,7 +26,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.7310585975646973f, 0.8807970285415649f, 0.9820137619972229f, 0.9996646642684937f}}},
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/logistic_float16_2.example.cpp b/nn/runtime/test/generated/examples/logistic_float16_2.example.cpp
index 85d81a3..6ec4236 100644
--- a/nn/runtime/test/generated/examples/logistic_float16_2.example.cpp
+++ b/nn/runtime/test/generated/examples/logistic_float16_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: logistic_float16_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -12,7 +13,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.0f, 1.0f, 510.0f, 3.0f, 508.0f, 5.0f, 506.0f, 7.0f, 504.0f, 9.0f, 502.0f, 11.0f, 500.0f, 13.0f, 498.0f, 15.0f, 496.0f, 17.0f, 494.0f, 19.0f, 492.0f, 21.0f, 490.0f, 23.0f, 488.0f, 25.0f, 486.0f, 27.0f, 484.0f, 29.0f, 482.0f, 31.0f, 480.0f, 33.0f, 478.0f, 35.0f, 476.0f, 37.0f, 474.0f, 39.0f, 472.0f, 41.0f, 470.0f, 43.0f, 468.0f, 45.0f, 466.0f, 47.0f, 464.0f, 49.0f, 462.0f, 51.0f, 460.0f, 53.0f, 458.0f, 55.0f, 456.0f, 57.0f, 454.0f, 59.0f, 452.0f, 61.0f, 450.0f, 63.0f, 448.0f, 65.0f, 446.0f, 67.0f, 444.0f, 69.0f, 442.0f, 71.0f, 440.0f, 73.0f, 438.0f, 75.0f, 436.0f, 77.0f, 434.0f, 79.0f, 432.0f, 81.0f, 430.0f, 83.0f, 428.0f, 85.0f, 426.0f, 87.0f, 424.0f, 89.0f, 422.0f, 91.0f, 420.0f, 93.0f, 418.0f, 95.0f, 416.0f, 97.0f, 414.0f, 99.0f, 412.0f, 101.0f, 410.0f, 103.0f, 408.0f, 105.0f, 406.0f, 107.0f, 404.0f, 109.0f, 402.0f, 111.0f, 400.0f, 113.0f, 398.0f, 115.0f, 396.0f, 117.0f, 394.0f, 119.0f, 392.0f, 121.0f, 390.0f, 123.0f, 388.0f, 125.0f, 386.0f, 127.0f, 384.0f, 129.0f, 382.0f, 131.0f, 380.0f, 133.0f, 378.0f, 135.0f, 376.0f, 137.0f, 374.0f, 139.0f, 372.0f, 141.0f, 370.0f, 143.0f, 368.0f, 145.0f, 366.0f, 147.0f, 364.0f, 149.0f, 362.0f, 151.0f, 360.0f, 153.0f, 358.0f, 155.0f, 356.0f, 157.0f, 354.0f, 159.0f, 352.0f, 161.0f, 350.0f, 163.0f, 348.0f, 165.0f, 346.0f, 167.0f, 344.0f, 169.0f, 342.0f, 171.0f, 340.0f, 173.0f, 338.0f, 175.0f, 336.0f, 177.0f, 334.0f, 179.0f, 332.0f, 181.0f, 330.0f, 183.0f, 328.0f, 185.0f, 326.0f, 187.0f, 324.0f, 189.0f, 322.0f, 191.0f, 320.0f, 193.0f, 318.0f, 195.0f, 316.0f, 197.0f, 314.0f, 199.0f, 312.0f, 201.0f, 310.0f, 203.0f, 308.0f, 205.0f, 306.0f, 207.0f, 304.0f, 209.0f, 302.0f, 211.0f, 300.0f, 213.0f, 298.0f, 215.0f, 296.0f, 217.0f, 294.0f, 219.0f, 292.0f, 221.0f, 290.0f, 223.0f, 288.0f, 225.0f, 286.0f, 227.0f, 284.0f, 229.0f, 282.0f, 231.0f, 280.0f, 233.0f, 278.0f, 235.0f, 276.0f, 237.0f, 274.0f, 239.0f, 272.0f, 241.0f, 270.0f, 243.0f, 268.0f, 245.0f, 266.0f, 247.0f, 264.0f, 249.0f, 262.0f, 251.0f, 260.0f, 253.0f, 258.0f, 255.0f, 256.0f, 257.0f, 254.0f, 259.0f, 252.0f, 261.0f, 250.0f, 263.0f, 248.0f, 265.0f, 246.0f, 267.0f, 244.0f, 269.0f, 242.0f, 271.0f, 240.0f, 273.0f, 238.0f, 275.0f, 236.0f, 277.0f, 234.0f, 279.0f, 232.0f, 281.0f, 230.0f, 283.0f, 228.0f, 285.0f, 226.0f, 287.0f, 224.0f, 289.0f, 222.0f, 291.0f, 220.0f, 293.0f, 218.0f, 295.0f, 216.0f, 297.0f, 214.0f, 299.0f, 212.0f, 301.0f, 210.0f, 303.0f, 208.0f, 305.0f, 206.0f, 307.0f, 204.0f, 309.0f, 202.0f, 311.0f, 200.0f, 313.0f, 198.0f, 315.0f, 196.0f, 317.0f, 194.0f, 319.0f, 192.0f, 321.0f, 190.0f, 323.0f, 188.0f, 325.0f, 186.0f, 327.0f, 184.0f, 329.0f, 182.0f, 331.0f, 180.0f, 333.0f, 178.0f, 335.0f, 176.0f, 337.0f, 174.0f, 339.0f, 172.0f, 341.0f, 170.0f, 343.0f, 168.0f, 345.0f, 166.0f, 347.0f, 164.0f, 349.0f, 162.0f, 351.0f, 160.0f, 353.0f, 158.0f, 355.0f, 156.0f, 357.0f, 154.0f, 359.0f, 152.0f, 361.0f, 150.0f, 363.0f, 148.0f, 365.0f, 146.0f, 367.0f, 144.0f, 369.0f, 142.0f, 371.0f, 140.0f, 373.0f, 138.0f, 375.0f, 136.0f, 377.0f, 134.0f, 379.0f, 132.0f, 381.0f, 130.0f, 383.0f, 128.0f, 385.0f, 126.0f, 387.0f, 124.0f, 389.0f, 122.0f, 391.0f, 120.0f, 393.0f, 118.0f, 395.0f, 116.0f, 397.0f, 114.0f, 399.0f, 112.0f, 401.0f, 110.0f, 403.0f, 108.0f, 405.0f, 106.0f, 407.0f, 104.0f, 409.0f, 102.0f, 411.0f, 100.0f, 413.0f, 98.0f, 415.0f, 96.0f, 417.0f, 94.0f, 419.0f, 92.0f, 421.0f, 90.0f, 423.0f, 88.0f, 425.0f, 86.0f, 427.0f, 84.0f, 429.0f, 82.0f, 431.0f, 80.0f, 433.0f, 78.0f, 435.0f, 76.0f, 437.0f, 74.0f, 439.0f, 72.0f, 441.0f, 70.0f, 443.0f, 68.0f, 445.0f, 66.0f, 447.0f, 64.0f, 449.0f, 62.0f, 451.0f, 60.0f, 453.0f, 58.0f, 455.0f, 56.0f, 457.0f, 54.0f, 459.0f, 52.0f, 461.0f, 50.0f, 463.0f, 48.0f, 465.0f, 46.0f, 467.0f, 44.0f, 469.0f, 42.0f, 471.0f, 40.0f, 473.0f, 38.0f, 475.0f, 36.0f, 477.0f, 34.0f, 479.0f, 32.0f, 481.0f, 30.0f, 483.0f, 28.0f, 485.0f, 26.0f, 487.0f, 24.0f, 489.0f, 22.0f, 491.0f, 20.0f, 493.0f, 18.0f, 495.0f, 16.0f, 497.0f, 14.0f, 499.0f, 12.0f, 501.0f, 10.0f, 503.0f, 8.0f, 505.0f, 6.0f, 507.0f, 4.0f, 509.0f, 2.0f, 511.0f, 0.0f, 1.0f, 510.0f, 3.0f, 508.0f, 5.0f, 506.0f, 7.0f, 504.0f, 9.0f, 502.0f, 11.0f, 500.0f, 13.0f, 498.0f, 15.0f, 496.0f, 17.0f, 494.0f, 19.0f, 492.0f, 21.0f, 490.0f, 23.0f, 488.0f, 25.0f, 486.0f, 27.0f, 484.0f, 29.0f, 482.0f, 31.0f, 480.0f, 33.0f, 478.0f, 35.0f, 476.0f, 37.0f, 474.0f, 39.0f, 472.0f, 41.0f, 470.0f, 43.0f, 468.0f, 45.0f, 466.0f, 47.0f, 464.0f, 49.0f, 462.0f, 51.0f, 460.0f, 53.0f, 458.0f, 55.0f, 456.0f, 57.0f, 454.0f, 59.0f, 452.0f, 61.0f, 450.0f, 63.0f, 448.0f, 65.0f, 446.0f, 67.0f, 444.0f, 69.0f, 442.0f, 71.0f, 440.0f, 73.0f, 438.0f, 75.0f, 436.0f, 77.0f, 434.0f, 79.0f, 432.0f, 81.0f, 430.0f, 83.0f, 428.0f, 85.0f, 426.0f, 87.0f, 424.0f, 89.0f, 422.0f, 91.0f, 420.0f, 93.0f, 418.0f, 95.0f, 416.0f, 97.0f, 414.0f, 99.0f, 412.0f, 101.0f, 410.0f, 103.0f, 408.0f, 105.0f, 406.0f, 107.0f, 404.0f, 109.0f, 402.0f, 111.0f, 400.0f, 113.0f, 398.0f, 115.0f, 396.0f, 117.0f, 394.0f, 119.0f, 392.0f, 121.0f, 390.0f, 123.0f, 388.0f, 125.0f, 386.0f, 127.0f, 384.0f, 129.0f, 382.0f, 131.0f, 380.0f, 133.0f, 378.0f, 135.0f, 376.0f, 137.0f, 374.0f, 139.0f, 372.0f, 141.0f, 370.0f, 143.0f, 368.0f, 145.0f, 366.0f, 147.0f, 364.0f, 149.0f, 362.0f, 151.0f, 360.0f, 153.0f, 358.0f, 155.0f, 356.0f, 157.0f, 354.0f, 159.0f, 352.0f, 161.0f, 350.0f, 163.0f, 348.0f, 165.0f, 346.0f, 167.0f, 344.0f, 169.0f, 342.0f, 171.0f, 340.0f, 173.0f, 338.0f, 175.0f, 336.0f, 177.0f, 334.0f, 179.0f, 332.0f, 181.0f, 330.0f, 183.0f, 328.0f, 185.0f, 326.0f, 187.0f, 324.0f, 189.0f, 322.0f, 191.0f, 320.0f, 193.0f, 318.0f, 195.0f, 316.0f, 197.0f, 314.0f, 199.0f, 312.0f, 201.0f, 310.0f, 203.0f, 308.0f, 205.0f, 306.0f, 207.0f, 304.0f, 209.0f, 302.0f, 211.0f, 300.0f, 213.0f, 298.0f, 215.0f, 296.0f, 217.0f, 294.0f, 219.0f, 292.0f, 221.0f, 290.0f, 223.0f, 288.0f, 225.0f, 286.0f, 227.0f, 284.0f, 229.0f, 282.0f, 231.0f, 280.0f, 233.0f, 278.0f, 235.0f, 276.0f, 237.0f, 274.0f, 239.0f, 272.0f, 241.0f, 270.0f, 243.0f, 268.0f, 245.0f, 266.0f, 247.0f, 264.0f, 249.0f, 262.0f, 251.0f, 260.0f, 253.0f, 258.0f, 255.0f, 256.0f, 257.0f, 254.0f, 259.0f, 252.0f, 261.0f, 250.0f, 263.0f, 248.0f, 265.0f, 246.0f, 267.0f, 244.0f, 269.0f, 242.0f, 271.0f, 240.0f, 273.0f, 238.0f, 275.0f, 236.0f, 277.0f, 234.0f, 279.0f, 232.0f, 281.0f, 230.0f, 283.0f, 228.0f, 285.0f, 226.0f, 287.0f, 224.0f, 289.0f, 222.0f, 291.0f, 220.0f, 293.0f, 218.0f, 295.0f, 216.0f, 297.0f, 214.0f, 299.0f, 212.0f, 301.0f, 210.0f, 303.0f, 208.0f, 305.0f, 206.0f, 307.0f, 204.0f, 309.0f, 202.0f, 311.0f, 200.0f, 313.0f, 198.0f, 315.0f, 196.0f, 317.0f, 194.0f, 319.0f, 192.0f, 321.0f, 190.0f, 323.0f, 188.0f, 325.0f, 186.0f, 327.0f, 184.0f, 329.0f, 182.0f, 331.0f, 180.0f, 333.0f, 178.0f, 335.0f, 176.0f, 337.0f, 174.0f, 339.0f, 172.0f, 341.0f, 170.0f, 343.0f, 168.0f, 345.0f, 166.0f, 347.0f, 164.0f, 349.0f, 162.0f, 351.0f, 160.0f, 353.0f, 158.0f, 355.0f, 156.0f, 357.0f, 154.0f, 359.0f, 152.0f, 361.0f, 150.0f, 363.0f, 148.0f, 365.0f, 146.0f, 367.0f, 144.0f, 369.0f, 142.0f, 371.0f, 140.0f, 373.0f, 138.0f, 375.0f, 136.0f, 377.0f, 134.0f, 379.0f, 132.0f, 381.0f, 130.0f, 383.0f, 128.0f, 385.0f, 126.0f, 387.0f, 124.0f, 389.0f, 122.0f, 391.0f, 120.0f, 393.0f, 118.0f, 395.0f, 116.0f, 397.0f, 114.0f, 399.0f, 112.0f, 401.0f, 110.0f, 403.0f, 108.0f, 405.0f, 106.0f, 407.0f, 104.0f, 409.0f, 102.0f, 411.0f, 100.0f, 413.0f, 98.0f, 415.0f, 96.0f, 417.0f, 94.0f, 419.0f, 92.0f, 421.0f, 90.0f, 423.0f, 88.0f, 425.0f, 86.0f, 427.0f, 84.0f, 429.0f, 82.0f, 431.0f, 80.0f, 433.0f, 78.0f, 435.0f, 76.0f, 437.0f, 74.0f, 439.0f, 72.0f, 441.0f, 70.0f, 443.0f, 68.0f, 445.0f, 66.0f, 447.0f, 64.0f, 449.0f, 62.0f, 451.0f, 60.0f, 453.0f, 58.0f, 455.0f, 56.0f, 457.0f, 54.0f, 459.0f, 52.0f, 461.0f, 50.0f, 463.0f, 48.0f, 465.0f, 46.0f, 467.0f, 44.0f, 469.0f, 42.0f, 471.0f, 40.0f, 473.0f, 38.0f, 475.0f, 36.0f, 477.0f, 34.0f, 479.0f, 32.0f, 481.0f, 30.0f, 483.0f, 28.0f, 485.0f, 26.0f, 487.0f, 24.0f, 489.0f, 22.0f, 491.0f, 20.0f, 493.0f, 18.0f, 495.0f, 16.0f, 497.0f, 14.0f, 499.0f, 12.0f, 501.0f, 10.0f, 503.0f, 8.0f, 505.0f, 6.0f, 507.0f, 4.0f, 509.0f, 2.0f, 511.0f, 0.0f, 1.0f, 510.0f, 3.0f, 508.0f, 5.0f, 506.0f, 7.0f, 504.0f, 9.0f, 502.0f, 11.0f, 500.0f, 13.0f, 498.0f, 15.0f, 496.0f, 17.0f, 494.0f, 19.0f, 492.0f, 21.0f, 490.0f, 23.0f, 488.0f, 25.0f, 486.0f, 27.0f, 484.0f, 29.0f, 482.0f, 31.0f, 480.0f, 33.0f, 478.0f, 35.0f, 476.0f, 37.0f, 474.0f, 39.0f, 472.0f, 41.0f, 470.0f, 43.0f, 468.0f, 45.0f, 466.0f, 47.0f, 464.0f, 49.0f, 462.0f, 51.0f, 460.0f, 53.0f, 458.0f, 55.0f, 456.0f, 57.0f, 454.0f, 59.0f, 452.0f, 61.0f, 450.0f, 63.0f, 448.0f, 65.0f, 446.0f, 67.0f, 444.0f, 69.0f, 442.0f, 71.0f, 440.0f, 73.0f, 438.0f, 75.0f, 436.0f, 77.0f, 434.0f, 79.0f, 432.0f, 81.0f, 430.0f, 83.0f, 428.0f, 85.0f, 426.0f, 87.0f, 424.0f, 89.0f, 422.0f, 91.0f, 420.0f, 93.0f, 418.0f, 95.0f, 416.0f, 97.0f, 414.0f, 99.0f, 412.0f, 101.0f, 410.0f, 103.0f, 408.0f, 105.0f, 406.0f, 107.0f, 404.0f, 109.0f, 402.0f, 111.0f, 400.0f, 113.0f, 398.0f, 115.0f, 396.0f, 117.0f, 394.0f, 119.0f, 392.0f, 121.0f, 390.0f, 123.0f, 388.0f, 125.0f, 386.0f, 127.0f, 384.0f, 129.0f, 382.0f, 131.0f, 380.0f, 133.0f, 378.0f, 135.0f, 376.0f, 137.0f, 374.0f, 139.0f, 372.0f, 141.0f, 370.0f, 143.0f, 368.0f, 145.0f, 366.0f, 147.0f, 364.0f, 149.0f, 362.0f, 151.0f, 360.0f, 153.0f, 358.0f, 155.0f, 356.0f, 157.0f, 354.0f, 159.0f, 352.0f, 161.0f, 350.0f, 163.0f, 348.0f, 165.0f, 346.0f, 167.0f, 344.0f, 169.0f, 342.0f, 171.0f, 340.0f, 173.0f, 338.0f, 175.0f, 336.0f, 177.0f, 334.0f, 179.0f, 332.0f, 181.0f, 330.0f, 183.0f, 328.0f, 185.0f, 326.0f, 187.0f, 324.0f, 189.0f, 322.0f, 191.0f, 320.0f, 193.0f, 318.0f, 195.0f, 316.0f, 197.0f, 314.0f, 199.0f, 312.0f, 201.0f, 310.0f, 203.0f, 308.0f, 205.0f, 306.0f, 207.0f, 304.0f, 209.0f, 302.0f, 211.0f, 300.0f, 213.0f, 298.0f, 215.0f, 296.0f, 217.0f, 294.0f, 219.0f, 292.0f, 221.0f, 290.0f, 223.0f, 288.0f, 225.0f, 286.0f, 227.0f, 284.0f, 229.0f, 282.0f, 231.0f, 280.0f, 233.0f, 278.0f, 235.0f, 276.0f, 237.0f, 274.0f, 239.0f, 272.0f, 241.0f, 270.0f, 243.0f, 268.0f, 245.0f, 266.0f, 247.0f, 264.0f, 249.0f, 262.0f, 251.0f, 260.0f, 253.0f, 258.0f, 255.0f, 256.0f, 257.0f, 254.0f, 259.0f, 252.0f, 261.0f, 250.0f, 263.0f, 248.0f, 265.0f, 246.0f, 267.0f, 244.0f, 269.0f, 242.0f, 271.0f, 240.0f, 273.0f, 238.0f, 275.0f, 236.0f, 277.0f, 234.0f, 279.0f, 232.0f, 281.0f, 230.0f, 283.0f, 228.0f, 285.0f, 226.0f, 287.0f, 224.0f, 289.0f, 222.0f, 291.0f, 220.0f, 293.0f, 218.0f, 295.0f, 216.0f, 297.0f, 214.0f, 299.0f, 212.0f, 301.0f, 210.0f, 303.0f, 208.0f, 305.0f, 206.0f, 307.0f, 204.0f, 309.0f, 202.0f, 311.0f, 200.0f, 313.0f, 198.0f, 315.0f, 196.0f, 317.0f, 194.0f, 319.0f, 192.0f, 321.0f, 190.0f, 323.0f, 188.0f, 325.0f, 186.0f, 327.0f, 184.0f, 329.0f, 182.0f, 331.0f, 180.0f, 333.0f, 178.0f, 335.0f, 176.0f, 337.0f, 174.0f, 339.0f, 172.0f, 341.0f, 170.0f, 343.0f, 168.0f, 345.0f, 166.0f, 347.0f, 164.0f, 349.0f, 162.0f, 351.0f, 160.0f, 353.0f, 158.0f, 355.0f, 156.0f, 357.0f, 154.0f, 359.0f, 152.0f, 361.0f, 150.0f, 363.0f, 148.0f, 365.0f, 146.0f, 367.0f, 144.0f, 369.0f, 142.0f, 371.0f, 140.0f, 373.0f, 138.0f, 375.0f, 136.0f, 377.0f, 134.0f, 379.0f, 132.0f, 381.0f, 130.0f, 383.0f, 128.0f, 385.0f, 126.0f, 387.0f, 124.0f, 389.0f, 122.0f, 391.0f, 120.0f, 393.0f, 118.0f, 395.0f, 116.0f, 397.0f, 114.0f, 399.0f, 112.0f, 401.0f, 110.0f, 403.0f, 108.0f, 405.0f, 106.0f, 407.0f, 104.0f, 409.0f, 102.0f, 411.0f, 100.0f, 413.0f, 98.0f, 415.0f, 96.0f, 417.0f, 94.0f, 419.0f, 92.0f, 421.0f, 90.0f, 423.0f, 88.0f, 425.0f, 86.0f, 427.0f, 84.0f, 429.0f, 82.0f, 431.0f, 80.0f, 433.0f, 78.0f, 435.0f, 76.0f, 437.0f, 74.0f, 439.0f, 72.0f, 441.0f, 70.0f, 443.0f, 68.0f, 445.0f, 66.0f, 447.0f, 64.0f, 449.0f, 62.0f, 451.0f, 60.0f, 453.0f, 58.0f, 455.0f, 56.0f, 457.0f, 54.0f, 459.0f, 52.0f, 461.0f, 50.0f, 463.0f, 48.0f, 465.0f, 46.0f, 467.0f, 44.0f, 469.0f, 42.0f, 471.0f, 40.0f, 473.0f, 38.0f, 475.0f, 36.0f, 477.0f, 34.0f, 479.0f, 32.0f, 481.0f, 30.0f, 483.0f, 28.0f, 485.0f, 26.0f, 487.0f, 24.0f, 489.0f, 22.0f, 491.0f, 20.0f, 493.0f, 18.0f, 495.0f, 16.0f, 497.0f, 14.0f, 499.0f, 12.0f, 501.0f, 10.0f, 503.0f, 8.0f, 505.0f, 6.0f, 507.0f, 4.0f, 509.0f, 2.0f, 511.0f, 0.0f, 1.0f, 510.0f, 3.0f, 508.0f, 5.0f, 506.0f, 7.0f, 504.0f, 9.0f, 502.0f, 11.0f, 500.0f, 13.0f, 498.0f, 15.0f, 496.0f, 17.0f, 494.0f, 19.0f, 492.0f, 21.0f, 490.0f, 23.0f, 488.0f, 25.0f, 486.0f, 27.0f, 484.0f, 29.0f, 482.0f, 31.0f, 480.0f, 33.0f, 478.0f, 35.0f, 476.0f, 37.0f, 474.0f, 39.0f, 472.0f, 41.0f, 470.0f, 43.0f, 468.0f, 45.0f, 466.0f, 47.0f, 464.0f, 49.0f, 462.0f, 51.0f, 460.0f, 53.0f, 458.0f, 55.0f, 456.0f, 57.0f, 454.0f, 59.0f, 452.0f, 61.0f, 450.0f, 63.0f, 448.0f, 65.0f, 446.0f, 67.0f, 444.0f, 69.0f, 442.0f, 71.0f, 440.0f, 73.0f, 438.0f, 75.0f, 436.0f, 77.0f, 434.0f, 79.0f, 432.0f, 81.0f, 430.0f, 83.0f, 428.0f, 85.0f, 426.0f, 87.0f, 424.0f, 89.0f, 422.0f, 91.0f, 420.0f, 93.0f, 418.0f, 95.0f, 416.0f, 97.0f, 414.0f, 99.0f, 412.0f, 101.0f, 410.0f, 103.0f, 408.0f, 105.0f, 406.0f, 107.0f, 404.0f, 109.0f, 402.0f, 111.0f, 400.0f, 113.0f, 398.0f, 115.0f, 396.0f, 117.0f, 394.0f, 119.0f, 392.0f, 121.0f, 390.0f, 123.0f, 388.0f, 125.0f, 386.0f, 127.0f, 384.0f, 129.0f, 382.0f, 131.0f, 380.0f, 133.0f, 378.0f, 135.0f, 376.0f, 137.0f, 374.0f, 139.0f, 372.0f, 141.0f, 370.0f, 143.0f, 368.0f, 145.0f, 366.0f, 147.0f, 364.0f, 149.0f, 362.0f, 151.0f, 360.0f, 153.0f, 358.0f, 155.0f, 356.0f, 157.0f, 354.0f, 159.0f, 352.0f, 161.0f, 350.0f, 163.0f, 348.0f, 165.0f, 346.0f, 167.0f, 344.0f, 169.0f, 342.0f, 171.0f, 340.0f, 173.0f, 338.0f, 175.0f, 336.0f, 177.0f, 334.0f, 179.0f, 332.0f, 181.0f, 330.0f, 183.0f, 328.0f, 185.0f, 326.0f, 187.0f, 324.0f, 189.0f, 322.0f, 191.0f, 320.0f, 193.0f, 318.0f, 195.0f, 316.0f, 197.0f, 314.0f, 199.0f, 312.0f, 201.0f, 310.0f, 203.0f, 308.0f, 205.0f, 306.0f, 207.0f, 304.0f, 209.0f, 302.0f, 211.0f, 300.0f, 213.0f, 298.0f, 215.0f, 296.0f, 217.0f, 294.0f, 219.0f, 292.0f, 221.0f, 290.0f, 223.0f, 288.0f, 225.0f, 286.0f, 227.0f, 284.0f, 229.0f, 282.0f, 231.0f, 280.0f, 233.0f, 278.0f, 235.0f, 276.0f, 237.0f, 274.0f, 239.0f, 272.0f, 241.0f, 270.0f, 243.0f, 268.0f, 245.0f, 266.0f, 247.0f, 264.0f, 249.0f, 262.0f, 251.0f, 260.0f, 253.0f, 258.0f, 255.0f, 256.0f, 257.0f, 254.0f, 259.0f, 252.0f, 261.0f, 250.0f, 263.0f, 248.0f, 265.0f, 246.0f, 267.0f, 244.0f, 269.0f, 242.0f, 271.0f, 240.0f, 273.0f, 238.0f, 275.0f, 236.0f, 277.0f, 234.0f, 279.0f, 232.0f, 281.0f, 230.0f, 283.0f, 228.0f, 285.0f, 226.0f, 287.0f, 224.0f, 289.0f, 222.0f, 291.0f, 220.0f, 293.0f, 218.0f, 295.0f, 216.0f, 297.0f, 214.0f, 299.0f, 212.0f, 301.0f, 210.0f, 303.0f, 208.0f, 305.0f, 206.0f, 307.0f, 204.0f, 309.0f, 202.0f, 311.0f, 200.0f, 313.0f, 198.0f, 315.0f, 196.0f, 317.0f, 194.0f, 319.0f, 192.0f, 321.0f, 190.0f, 323.0f, 188.0f, 325.0f, 186.0f, 327.0f, 184.0f, 329.0f, 182.0f, 331.0f, 180.0f, 333.0f, 178.0f, 335.0f, 176.0f, 337.0f, 174.0f, 339.0f, 172.0f, 341.0f, 170.0f, 343.0f, 168.0f, 345.0f, 166.0f, 347.0f, 164.0f, 349.0f, 162.0f, 351.0f, 160.0f, 353.0f, 158.0f, 355.0f, 156.0f, 357.0f, 154.0f, 359.0f, 152.0f, 361.0f, 150.0f, 363.0f, 148.0f, 365.0f, 146.0f, 367.0f, 144.0f, 369.0f, 142.0f, 371.0f, 140.0f, 373.0f, 138.0f, 375.0f, 136.0f, 377.0f, 134.0f, 379.0f, 132.0f, 381.0f, 130.0f, 383.0f, 128.0f, 385.0f, 126.0f, 387.0f, 124.0f, 389.0f, 122.0f, 391.0f, 120.0f, 393.0f, 118.0f, 395.0f, 116.0f, 397.0f, 114.0f, 399.0f, 112.0f, 401.0f, 110.0f, 403.0f, 108.0f, 405.0f, 106.0f, 407.0f, 104.0f, 409.0f, 102.0f, 411.0f, 100.0f, 413.0f, 98.0f, 415.0f, 96.0f, 417.0f, 94.0f, 419.0f, 92.0f, 421.0f, 90.0f, 423.0f, 88.0f, 425.0f, 86.0f, 427.0f, 84.0f, 429.0f, 82.0f, 431.0f, 80.0f, 433.0f, 78.0f, 435.0f, 76.0f, 437.0f, 74.0f, 439.0f, 72.0f, 441.0f, 70.0f, 443.0f, 68.0f, 445.0f, 66.0f, 447.0f, 64.0f, 449.0f, 62.0f, 451.0f, 60.0f, 453.0f, 58.0f, 455.0f, 56.0f, 457.0f, 54.0f, 459.0f, 52.0f, 461.0f, 50.0f, 463.0f, 48.0f, 465.0f, 46.0f, 467.0f, 44.0f, 469.0f, 42.0f, 471.0f, 40.0f, 473.0f, 38.0f, 475.0f, 36.0f, 477.0f, 34.0f, 479.0f, 32.0f, 481.0f, 30.0f, 483.0f, 28.0f, 485.0f, 26.0f, 487.0f, 24.0f, 489.0f, 22.0f, 491.0f, 20.0f, 493.0f, 18.0f, 495.0f, 16.0f, 497.0f, 14.0f, 499.0f, 12.0f, 501.0f, 10.0f, 503.0f, 8.0f, 505.0f, 6.0f, 507.0f, 4.0f, 509.0f, 2.0f, 511.0f, 0.0f, 1.0f, 510.0f, 3.0f, 508.0f, 5.0f, 506.0f, 7.0f, 504.0f, 9.0f, 502.0f, 11.0f, 500.0f, 13.0f, 498.0f, 15.0f, 496.0f, 17.0f, 494.0f, 19.0f, 492.0f, 21.0f, 490.0f, 23.0f, 488.0f, 25.0f, 486.0f, 27.0f, 484.0f, 29.0f, 482.0f, 31.0f, 480.0f, 33.0f, 478.0f, 35.0f, 476.0f, 37.0f, 474.0f, 39.0f, 472.0f, 41.0f, 470.0f, 43.0f, 468.0f, 45.0f, 466.0f, 47.0f, 464.0f, 49.0f, 462.0f, 51.0f, 460.0f, 53.0f, 458.0f, 55.0f, 456.0f, 57.0f, 454.0f, 59.0f, 452.0f, 61.0f, 450.0f, 63.0f, 448.0f, 65.0f, 446.0f, 67.0f, 444.0f, 69.0f, 442.0f, 71.0f, 440.0f, 73.0f, 438.0f, 75.0f, 436.0f, 77.0f, 434.0f, 79.0f, 432.0f, 81.0f, 430.0f, 83.0f, 428.0f, 85.0f, 426.0f, 87.0f, 424.0f, 89.0f, 422.0f, 91.0f, 420.0f, 93.0f, 418.0f, 95.0f, 416.0f, 97.0f, 414.0f, 99.0f, 412.0f, 101.0f, 410.0f, 103.0f, 408.0f, 105.0f, 406.0f, 107.0f, 404.0f, 109.0f, 402.0f, 111.0f, 400.0f, 113.0f, 398.0f, 115.0f, 396.0f, 117.0f, 394.0f, 119.0f, 392.0f, 121.0f, 390.0f, 123.0f, 388.0f, 125.0f, 386.0f, 127.0f, 384.0f, 129.0f, 382.0f, 131.0f, 380.0f, 133.0f, 378.0f, 135.0f, 376.0f, 137.0f, 374.0f, 139.0f, 372.0f, 141.0f, 370.0f, 143.0f, 368.0f, 145.0f, 366.0f, 147.0f, 364.0f, 149.0f, 362.0f, 151.0f, 360.0f, 153.0f, 358.0f, 155.0f, 356.0f, 157.0f, 354.0f, 159.0f, 352.0f, 161.0f, 350.0f, 163.0f, 348.0f, 165.0f, 346.0f, 167.0f, 344.0f, 169.0f, 342.0f, 171.0f, 340.0f, 173.0f, 338.0f, 175.0f, 336.0f, 177.0f, 334.0f, 179.0f, 332.0f, 181.0f, 330.0f, 183.0f, 328.0f, 185.0f, 326.0f, 187.0f, 324.0f, 189.0f, 322.0f, 191.0f, 320.0f, 193.0f, 318.0f, 195.0f, 316.0f, 197.0f, 314.0f, 199.0f, 312.0f, 201.0f, 310.0f, 203.0f, 308.0f, 205.0f, 306.0f, 207.0f, 304.0f, 209.0f, 302.0f, 211.0f, 300.0f, 213.0f, 298.0f, 215.0f, 296.0f, 217.0f, 294.0f, 219.0f, 292.0f, 221.0f, 290.0f, 223.0f, 288.0f, 225.0f, 286.0f, 227.0f, 284.0f, 229.0f, 282.0f, 231.0f, 280.0f, 233.0f, 278.0f, 235.0f, 276.0f, 237.0f, 274.0f, 239.0f, 272.0f, 241.0f, 270.0f, 243.0f, 268.0f, 245.0f, 266.0f, 247.0f, 264.0f, 249.0f, 262.0f, 251.0f, 260.0f, 253.0f, 258.0f, 255.0f, 256.0f, 257.0f, 254.0f, 259.0f, 252.0f, 261.0f, 250.0f, 263.0f, 248.0f, 265.0f, 246.0f, 267.0f, 244.0f, 269.0f, 242.0f, 271.0f, 240.0f, 273.0f, 238.0f, 275.0f, 236.0f, 277.0f, 234.0f, 279.0f, 232.0f, 281.0f, 230.0f, 283.0f, 228.0f, 285.0f, 226.0f, 287.0f, 224.0f, 289.0f, 222.0f, 291.0f, 220.0f, 293.0f, 218.0f, 295.0f, 216.0f, 297.0f, 214.0f, 299.0f, 212.0f, 301.0f, 210.0f, 303.0f, 208.0f, 305.0f, 206.0f, 307.0f, 204.0f, 309.0f, 202.0f, 311.0f, 200.0f, 313.0f, 198.0f, 315.0f, 196.0f, 317.0f, 194.0f, 319.0f, 192.0f, 321.0f, 190.0f, 323.0f, 188.0f, 325.0f, 186.0f, 327.0f, 184.0f, 329.0f, 182.0f, 331.0f, 180.0f, 333.0f, 178.0f, 335.0f, 176.0f, 337.0f, 174.0f, 339.0f, 172.0f, 341.0f, 170.0f, 343.0f, 168.0f, 345.0f, 166.0f, 347.0f, 164.0f, 349.0f, 162.0f, 351.0f, 160.0f, 353.0f, 158.0f, 355.0f, 156.0f, 357.0f, 154.0f, 359.0f, 152.0f, 361.0f, 150.0f, 363.0f, 148.0f, 365.0f, 146.0f, 367.0f, 144.0f, 369.0f, 142.0f, 371.0f, 140.0f, 373.0f, 138.0f, 375.0f, 136.0f, 377.0f, 134.0f, 379.0f, 132.0f, 381.0f, 130.0f, 383.0f, 128.0f, 385.0f, 126.0f, 387.0f, 124.0f, 389.0f, 122.0f, 391.0f, 120.0f, 393.0f, 118.0f, 395.0f, 116.0f, 397.0f, 114.0f, 399.0f, 112.0f, 401.0f, 110.0f, 403.0f, 108.0f, 405.0f, 106.0f, 407.0f, 104.0f, 409.0f, 102.0f, 411.0f, 100.0f, 413.0f, 98.0f, 415.0f, 96.0f, 417.0f, 94.0f, 419.0f, 92.0f, 421.0f, 90.0f, 423.0f, 88.0f, 425.0f, 86.0f, 427.0f, 84.0f, 429.0f, 82.0f, 431.0f, 80.0f, 433.0f, 78.0f, 435.0f, 76.0f, 437.0f, 74.0f, 439.0f, 72.0f, 441.0f, 70.0f, 443.0f, 68.0f, 445.0f, 66.0f, 447.0f, 64.0f, 449.0f, 62.0f, 451.0f, 60.0f, 453.0f, 58.0f, 455.0f, 56.0f, 457.0f, 54.0f, 459.0f, 52.0f, 461.0f, 50.0f, 463.0f, 48.0f, 465.0f, 46.0f, 467.0f, 44.0f, 469.0f, 42.0f, 471.0f, 40.0f, 473.0f, 38.0f, 475.0f, 36.0f, 477.0f, 34.0f, 479.0f, 32.0f, 481.0f, 30.0f, 483.0f, 28.0f, 485.0f, 26.0f, 487.0f, 24.0f, 489.0f, 22.0f, 491.0f, 20.0f, 493.0f, 18.0f, 495.0f, 16.0f, 497.0f, 14.0f, 499.0f, 12.0f, 501.0f, 10.0f, 503.0f, 8.0f, 505.0f, 6.0f, 507.0f, 4.0f, 509.0f, 2.0f, 511.0f, 0.0f, 1.0f, 510.0f, 3.0f, 508.0f, 5.0f, 506.0f, 7.0f, 504.0f, 9.0f, 502.0f, 11.0f, 500.0f, 13.0f, 498.0f, 15.0f, 496.0f, 17.0f, 494.0f, 19.0f, 492.0f, 21.0f, 490.0f, 23.0f, 488.0f, 25.0f, 486.0f, 27.0f, 484.0f, 29.0f, 482.0f, 31.0f, 480.0f, 33.0f, 478.0f, 35.0f, 476.0f, 37.0f, 474.0f, 39.0f, 472.0f, 41.0f, 470.0f, 43.0f, 468.0f, 45.0f, 466.0f, 47.0f, 464.0f, 49.0f, 462.0f, 51.0f, 460.0f, 53.0f, 458.0f, 55.0f, 456.0f, 57.0f, 454.0f, 59.0f, 452.0f, 61.0f, 450.0f, 63.0f, 448.0f, 65.0f, 446.0f, 67.0f, 444.0f, 69.0f, 442.0f, 71.0f, 440.0f, 73.0f, 438.0f, 75.0f, 436.0f, 77.0f, 434.0f, 79.0f, 432.0f, 81.0f, 430.0f, 83.0f, 428.0f, 85.0f, 426.0f, 87.0f, 424.0f, 89.0f, 422.0f, 91.0f, 420.0f, 93.0f, 418.0f, 95.0f, 416.0f, 97.0f, 414.0f, 99.0f, 412.0f, 101.0f, 410.0f, 103.0f, 408.0f, 105.0f, 406.0f, 107.0f, 404.0f, 109.0f, 402.0f, 111.0f, 400.0f, 113.0f, 398.0f, 115.0f, 396.0f, 117.0f, 394.0f, 119.0f, 392.0f, 121.0f, 390.0f, 123.0f, 388.0f, 125.0f, 386.0f, 127.0f, 384.0f, 129.0f, 382.0f, 131.0f, 380.0f, 133.0f, 378.0f, 135.0f, 376.0f, 137.0f, 374.0f, 139.0f, 372.0f, 141.0f, 370.0f, 143.0f, 368.0f, 145.0f, 366.0f, 147.0f, 364.0f, 149.0f, 362.0f, 151.0f, 360.0f, 153.0f, 358.0f, 155.0f, 356.0f, 157.0f, 354.0f, 159.0f, 352.0f, 161.0f, 350.0f, 163.0f, 348.0f, 165.0f, 346.0f, 167.0f, 344.0f, 169.0f, 342.0f, 171.0f, 340.0f, 173.0f, 338.0f, 175.0f, 336.0f, 177.0f, 334.0f, 179.0f, 332.0f, 181.0f, 330.0f, 183.0f, 328.0f, 185.0f, 326.0f, 187.0f, 324.0f, 189.0f, 322.0f, 191.0f, 320.0f, 193.0f, 318.0f, 195.0f, 316.0f, 197.0f, 314.0f, 199.0f, 312.0f, 201.0f, 310.0f, 203.0f, 308.0f, 205.0f, 306.0f, 207.0f, 304.0f, 209.0f, 302.0f, 211.0f, 300.0f, 213.0f, 298.0f, 215.0f, 296.0f, 217.0f, 294.0f, 219.0f, 292.0f, 221.0f, 290.0f, 223.0f, 288.0f, 225.0f, 286.0f, 227.0f, 284.0f, 229.0f, 282.0f, 231.0f, 280.0f, 233.0f, 278.0f, 235.0f, 276.0f, 237.0f, 274.0f, 239.0f, 272.0f, 241.0f, 270.0f, 243.0f, 268.0f, 245.0f, 266.0f, 247.0f, 264.0f, 249.0f, 262.0f, 251.0f, 260.0f, 253.0f, 258.0f, 255.0f, 256.0f, 257.0f, 254.0f, 259.0f, 252.0f, 261.0f, 250.0f, 263.0f, 248.0f, 265.0f, 246.0f, 267.0f, 244.0f, 269.0f, 242.0f, 271.0f, 240.0f, 273.0f, 238.0f, 275.0f, 236.0f, 277.0f, 234.0f, 279.0f, 232.0f, 281.0f, 230.0f, 283.0f, 228.0f, 285.0f, 226.0f, 287.0f, 224.0f, 289.0f, 222.0f, 291.0f, 220.0f, 293.0f, 218.0f, 295.0f, 216.0f, 297.0f, 214.0f, 299.0f, 212.0f, 301.0f, 210.0f, 303.0f, 208.0f, 305.0f, 206.0f, 307.0f, 204.0f, 309.0f, 202.0f, 311.0f, 200.0f, 313.0f, 198.0f, 315.0f, 196.0f, 317.0f, 194.0f, 319.0f, 192.0f, 321.0f, 190.0f, 323.0f, 188.0f, 325.0f, 186.0f, 327.0f, 184.0f, 329.0f, 182.0f, 331.0f, 180.0f, 333.0f, 178.0f, 335.0f, 176.0f, 337.0f, 174.0f, 339.0f, 172.0f, 341.0f, 170.0f, 343.0f, 168.0f, 345.0f, 166.0f, 347.0f, 164.0f, 349.0f, 162.0f, 351.0f, 160.0f, 353.0f, 158.0f, 355.0f, 156.0f, 357.0f, 154.0f, 359.0f, 152.0f, 361.0f, 150.0f, 363.0f, 148.0f, 365.0f, 146.0f, 367.0f, 144.0f, 369.0f, 142.0f, 371.0f, 140.0f, 373.0f, 138.0f, 375.0f, 136.0f, 377.0f, 134.0f, 379.0f, 132.0f, 381.0f, 130.0f, 383.0f, 128.0f, 385.0f, 126.0f, 387.0f, 124.0f, 389.0f, 122.0f, 391.0f, 120.0f, 393.0f, 118.0f, 395.0f, 116.0f, 397.0f, 114.0f, 399.0f, 112.0f, 401.0f, 110.0f, 403.0f, 108.0f, 405.0f, 106.0f, 407.0f, 104.0f, 409.0f, 102.0f, 411.0f, 100.0f, 413.0f, 98.0f, 415.0f, 96.0f, 417.0f, 94.0f, 419.0f, 92.0f, 421.0f, 90.0f, 423.0f, 88.0f, 425.0f, 86.0f, 427.0f, 84.0f, 429.0f, 82.0f, 431.0f, 80.0f, 433.0f, 78.0f, 435.0f, 76.0f, 437.0f, 74.0f, 439.0f, 72.0f, 441.0f, 70.0f, 443.0f, 68.0f, 445.0f, 66.0f, 447.0f, 64.0f, 449.0f, 62.0f, 451.0f, 60.0f, 453.0f, 58.0f, 455.0f, 56.0f, 457.0f, 54.0f, 459.0f, 52.0f, 461.0f, 50.0f, 463.0f, 48.0f, 465.0f, 46.0f, 467.0f, 44.0f, 469.0f, 42.0f, 471.0f, 40.0f, 473.0f, 38.0f, 475.0f, 36.0f, 477.0f, 34.0f, 479.0f, 32.0f, 481.0f, 30.0f, 483.0f, 28.0f, 485.0f, 26.0f, 487.0f, 24.0f, 489.0f, 22.0f, 491.0f, 20.0f, 493.0f, 18.0f, 495.0f, 16.0f, 497.0f, 14.0f, 499.0f, 12.0f, 501.0f, 10.0f, 503.0f, 8.0f, 505.0f, 6.0f, 507.0f, 4.0f, 509.0f, 2.0f, 511.0f, 0.0f, 1.0f, 510.0f, 3.0f, 508.0f, 5.0f, 506.0f, 7.0f, 504.0f, 9.0f, 502.0f, 11.0f, 500.0f, 13.0f, 498.0f, 15.0f, 496.0f, 17.0f, 494.0f, 19.0f, 492.0f, 21.0f, 490.0f, 23.0f, 488.0f, 25.0f, 486.0f, 27.0f, 484.0f, 29.0f, 482.0f, 31.0f, 480.0f, 33.0f, 478.0f, 35.0f, 476.0f, 37.0f, 474.0f, 39.0f, 472.0f, 41.0f, 470.0f, 43.0f, 468.0f, 45.0f, 466.0f, 47.0f, 464.0f, 49.0f, 462.0f, 51.0f, 460.0f, 53.0f, 458.0f, 55.0f, 456.0f, 57.0f, 454.0f, 59.0f, 452.0f, 61.0f, 450.0f, 63.0f, 448.0f, 65.0f, 446.0f, 67.0f, 444.0f, 69.0f, 442.0f, 71.0f, 440.0f, 73.0f, 438.0f, 75.0f, 436.0f, 77.0f, 434.0f, 79.0f, 432.0f, 81.0f, 430.0f, 83.0f, 428.0f, 85.0f, 426.0f, 87.0f, 424.0f, 89.0f, 422.0f, 91.0f, 420.0f, 93.0f, 418.0f, 95.0f, 416.0f, 97.0f, 414.0f, 99.0f, 412.0f, 101.0f, 410.0f, 103.0f, 408.0f, 105.0f, 406.0f, 107.0f, 404.0f, 109.0f, 402.0f, 111.0f, 400.0f, 113.0f, 398.0f, 115.0f, 396.0f, 117.0f, 394.0f, 119.0f, 392.0f, 121.0f, 390.0f, 123.0f, 388.0f, 125.0f, 386.0f, 127.0f, 384.0f, 129.0f, 382.0f, 131.0f, 380.0f, 133.0f, 378.0f, 135.0f, 376.0f, 137.0f, 374.0f, 139.0f, 372.0f, 141.0f, 370.0f, 143.0f, 368.0f, 145.0f, 366.0f, 147.0f, 364.0f, 149.0f, 362.0f, 151.0f, 360.0f, 153.0f, 358.0f, 155.0f, 356.0f, 157.0f, 354.0f, 159.0f, 352.0f, 161.0f, 350.0f, 163.0f, 348.0f, 165.0f, 346.0f, 167.0f, 344.0f, 169.0f, 342.0f, 171.0f, 340.0f, 173.0f, 338.0f, 175.0f, 336.0f, 177.0f, 334.0f, 179.0f, 332.0f, 181.0f, 330.0f, 183.0f, 328.0f, 185.0f, 326.0f, 187.0f, 324.0f, 189.0f, 322.0f, 191.0f, 320.0f, 193.0f, 318.0f, 195.0f, 316.0f, 197.0f, 314.0f, 199.0f, 312.0f, 201.0f, 310.0f, 203.0f, 308.0f, 205.0f, 306.0f, 207.0f, 304.0f, 209.0f, 302.0f, 211.0f, 300.0f, 213.0f, 298.0f, 215.0f, 296.0f, 217.0f, 294.0f, 219.0f, 292.0f, 221.0f, 290.0f, 223.0f, 288.0f, 225.0f, 286.0f, 227.0f, 284.0f, 229.0f, 282.0f, 231.0f, 280.0f, 233.0f, 278.0f, 235.0f, 276.0f, 237.0f, 274.0f, 239.0f, 272.0f, 241.0f, 270.0f, 243.0f, 268.0f, 245.0f, 266.0f, 247.0f, 264.0f, 249.0f, 262.0f, 251.0f, 260.0f, 253.0f, 258.0f, 255.0f, 256.0f, 257.0f, 254.0f, 259.0f, 252.0f, 261.0f, 250.0f, 263.0f, 248.0f, 265.0f, 246.0f, 267.0f, 244.0f, 269.0f, 242.0f, 271.0f, 240.0f, 273.0f, 238.0f, 275.0f, 236.0f, 277.0f, 234.0f, 279.0f, 232.0f, 281.0f, 230.0f, 283.0f, 228.0f, 285.0f, 226.0f, 287.0f, 224.0f, 289.0f, 222.0f, 291.0f, 220.0f, 293.0f, 218.0f, 295.0f, 216.0f, 297.0f, 214.0f, 299.0f, 212.0f, 301.0f, 210.0f, 303.0f, 208.0f, 305.0f, 206.0f, 307.0f, 204.0f, 309.0f, 202.0f, 311.0f, 200.0f, 313.0f, 198.0f, 315.0f, 196.0f, 317.0f, 194.0f, 319.0f, 192.0f, 321.0f, 190.0f, 323.0f, 188.0f, 325.0f, 186.0f, 327.0f, 184.0f, 329.0f, 182.0f, 331.0f, 180.0f, 333.0f, 178.0f, 335.0f, 176.0f, 337.0f, 174.0f, 339.0f, 172.0f, 341.0f, 170.0f, 343.0f, 168.0f, 345.0f, 166.0f, 347.0f, 164.0f, 349.0f, 162.0f, 351.0f, 160.0f, 353.0f, 158.0f, 355.0f, 156.0f, 357.0f, 154.0f, 359.0f, 152.0f, 361.0f, 150.0f, 363.0f, 148.0f, 365.0f, 146.0f, 367.0f, 144.0f, 369.0f, 142.0f, 371.0f, 140.0f, 373.0f, 138.0f, 375.0f, 136.0f, 377.0f, 134.0f, 379.0f, 132.0f, 381.0f, 130.0f, 383.0f, 128.0f, 385.0f, 126.0f, 387.0f, 124.0f, 389.0f, 122.0f, 391.0f, 120.0f, 393.0f, 118.0f, 395.0f, 116.0f, 397.0f, 114.0f, 399.0f, 112.0f, 401.0f, 110.0f, 403.0f, 108.0f, 405.0f, 106.0f, 407.0f, 104.0f, 409.0f, 102.0f, 411.0f, 100.0f, 413.0f, 98.0f, 415.0f, 96.0f, 417.0f, 94.0f, 419.0f, 92.0f, 421.0f, 90.0f, 423.0f, 88.0f, 425.0f, 86.0f, 427.0f, 84.0f, 429.0f, 82.0f, 431.0f, 80.0f, 433.0f, 78.0f, 435.0f, 76.0f, 437.0f, 74.0f, 439.0f, 72.0f, 441.0f, 70.0f, 443.0f, 68.0f, 445.0f, 66.0f, 447.0f, 64.0f, 449.0f, 62.0f, 451.0f, 60.0f, 453.0f, 58.0f, 455.0f, 56.0f, 457.0f, 54.0f, 459.0f, 52.0f, 461.0f, 50.0f, 463.0f, 48.0f, 465.0f, 46.0f, 467.0f, 44.0f, 469.0f, 42.0f, 471.0f, 40.0f, 473.0f, 38.0f, 475.0f, 36.0f, 477.0f, 34.0f, 479.0f, 32.0f, 481.0f, 30.0f, 483.0f, 28.0f, 485.0f, 26.0f, 487.0f, 24.0f, 489.0f, 22.0f, 491.0f, 20.0f, 493.0f, 18.0f, 495.0f, 16.0f, 497.0f, 14.0f, 499.0f, 12.0f, 501.0f, 10.0f, 503.0f, 8.0f, 505.0f, 6.0f, 507.0f, 4.0f, 509.0f, 2.0f, 511.0f, 0.0f, 1.0f, 510.0f, 3.0f, 508.0f, 5.0f, 506.0f, 7.0f, 504.0f, 9.0f, 502.0f, 11.0f, 500.0f, 13.0f, 498.0f, 15.0f, 496.0f, 17.0f, 494.0f, 19.0f, 492.0f, 21.0f, 490.0f, 23.0f, 488.0f, 25.0f, 486.0f, 27.0f, 484.0f, 29.0f, 482.0f, 31.0f, 480.0f, 33.0f, 478.0f, 35.0f, 476.0f, 37.0f, 474.0f, 39.0f, 472.0f, 41.0f, 470.0f, 43.0f, 468.0f, 45.0f, 466.0f, 47.0f, 464.0f, 49.0f, 462.0f, 51.0f, 460.0f, 53.0f, 458.0f, 55.0f, 456.0f, 57.0f, 454.0f, 59.0f, 452.0f, 61.0f, 450.0f, 63.0f, 448.0f, 65.0f, 446.0f, 67.0f, 444.0f, 69.0f, 442.0f, 71.0f, 440.0f, 73.0f, 438.0f, 75.0f, 436.0f, 77.0f, 434.0f, 79.0f, 432.0f, 81.0f, 430.0f, 83.0f, 428.0f, 85.0f, 426.0f, 87.0f, 424.0f, 89.0f, 422.0f, 91.0f, 420.0f, 93.0f, 418.0f, 95.0f, 416.0f, 97.0f, 414.0f, 99.0f, 412.0f, 101.0f, 410.0f, 103.0f, 408.0f, 105.0f, 406.0f, 107.0f, 404.0f, 109.0f, 402.0f, 111.0f, 400.0f, 113.0f, 398.0f, 115.0f, 396.0f, 117.0f, 394.0f, 119.0f, 392.0f, 121.0f, 390.0f, 123.0f, 388.0f, 125.0f, 386.0f, 127.0f, 384.0f, 129.0f, 382.0f, 131.0f, 380.0f, 133.0f, 378.0f, 135.0f, 376.0f, 137.0f, 374.0f, 139.0f, 372.0f, 141.0f, 370.0f, 143.0f, 368.0f, 145.0f, 366.0f, 147.0f, 364.0f, 149.0f, 362.0f, 151.0f, 360.0f, 153.0f, 358.0f, 155.0f, 356.0f, 157.0f, 354.0f, 159.0f, 352.0f, 161.0f, 350.0f, 163.0f, 348.0f, 165.0f, 346.0f, 167.0f, 344.0f, 169.0f, 342.0f, 171.0f, 340.0f, 173.0f, 338.0f, 175.0f, 336.0f, 177.0f, 334.0f, 179.0f, 332.0f, 181.0f, 330.0f, 183.0f, 328.0f, 185.0f, 326.0f, 187.0f, 324.0f, 189.0f, 322.0f, 191.0f, 320.0f, 193.0f, 318.0f, 195.0f, 316.0f, 197.0f, 314.0f, 199.0f, 312.0f, 201.0f, 310.0f, 203.0f, 308.0f, 205.0f, 306.0f, 207.0f, 304.0f, 209.0f, 302.0f, 211.0f, 300.0f, 213.0f, 298.0f, 215.0f, 296.0f, 217.0f, 294.0f, 219.0f, 292.0f, 221.0f, 290.0f, 223.0f, 288.0f, 225.0f, 286.0f, 227.0f, 284.0f, 229.0f, 282.0f, 231.0f, 280.0f, 233.0f, 278.0f, 235.0f, 276.0f, 237.0f, 274.0f, 239.0f, 272.0f, 241.0f, 270.0f, 243.0f, 268.0f, 245.0f, 266.0f, 247.0f, 264.0f, 249.0f, 262.0f, 251.0f, 260.0f, 253.0f, 258.0f, 255.0f, 256.0f, 257.0f, 254.0f, 259.0f, 252.0f, 261.0f, 250.0f, 263.0f, 248.0f, 265.0f, 246.0f, 267.0f, 244.0f, 269.0f, 242.0f, 271.0f, 240.0f, 273.0f, 238.0f, 275.0f, 236.0f, 277.0f, 234.0f, 279.0f, 232.0f, 281.0f, 230.0f, 283.0f, 228.0f, 285.0f, 226.0f, 287.0f, 224.0f, 289.0f, 222.0f, 291.0f, 220.0f, 293.0f, 218.0f, 295.0f, 216.0f, 297.0f, 214.0f, 299.0f, 212.0f, 301.0f, 210.0f, 303.0f, 208.0f, 305.0f, 206.0f, 307.0f, 204.0f, 309.0f, 202.0f, 311.0f, 200.0f, 313.0f, 198.0f, 315.0f, 196.0f, 317.0f, 194.0f, 319.0f, 192.0f, 321.0f, 190.0f, 323.0f, 188.0f, 325.0f, 186.0f, 327.0f, 184.0f, 329.0f, 182.0f, 331.0f, 180.0f, 333.0f, 178.0f, 335.0f, 176.0f, 337.0f, 174.0f, 339.0f, 172.0f, 341.0f, 170.0f, 343.0f, 168.0f, 345.0f, 166.0f, 347.0f, 164.0f, 349.0f, 162.0f, 351.0f, 160.0f, 353.0f, 158.0f, 355.0f, 156.0f, 357.0f, 154.0f, 359.0f, 152.0f, 361.0f, 150.0f, 363.0f, 148.0f, 365.0f, 146.0f, 367.0f, 144.0f, 369.0f, 142.0f, 371.0f, 140.0f, 373.0f, 138.0f, 375.0f, 136.0f, 377.0f, 134.0f, 379.0f, 132.0f, 381.0f, 130.0f, 383.0f, 128.0f, 385.0f, 126.0f, 387.0f, 124.0f, 389.0f, 122.0f, 391.0f, 120.0f, 393.0f, 118.0f, 395.0f, 116.0f, 397.0f, 114.0f, 399.0f, 112.0f, 401.0f, 110.0f, 403.0f, 108.0f, 405.0f, 106.0f, 407.0f, 104.0f, 409.0f, 102.0f, 411.0f, 100.0f, 413.0f, 98.0f, 415.0f, 96.0f, 417.0f, 94.0f, 419.0f, 92.0f, 421.0f, 90.0f, 423.0f, 88.0f, 425.0f, 86.0f, 427.0f, 84.0f, 429.0f, 82.0f, 431.0f, 80.0f, 433.0f, 78.0f, 435.0f, 76.0f, 437.0f, 74.0f, 439.0f, 72.0f, 441.0f, 70.0f, 443.0f, 68.0f, 445.0f, 66.0f, 447.0f, 64.0f, 449.0f, 62.0f, 451.0f, 60.0f, 453.0f, 58.0f, 455.0f, 56.0f, 457.0f, 54.0f, 459.0f, 52.0f, 461.0f, 50.0f, 463.0f, 48.0f, 465.0f, 46.0f, 467.0f, 44.0f, 469.0f, 42.0f, 471.0f, 40.0f, 473.0f, 38.0f, 475.0f, 36.0f, 477.0f, 34.0f, 479.0f, 32.0f, 481.0f, 30.0f, 483.0f, 28.0f, 485.0f, 26.0f, 487.0f, 24.0f, 489.0f, 22.0f, 491.0f, 20.0f, 493.0f, 18.0f, 495.0f, 16.0f, 497.0f, 14.0f, 499.0f, 12.0f, 501.0f, 10.0f, 503.0f, 8.0f, 505.0f, 6.0f, 507.0f, 4.0f, 509.0f, 2.0f, 511.0f, 0.0f, 1.0f, 510.0f, 3.0f, 508.0f, 5.0f, 506.0f, 7.0f, 504.0f, 9.0f, 502.0f, 11.0f, 500.0f, 13.0f, 498.0f, 15.0f, 496.0f, 17.0f, 494.0f, 19.0f, 492.0f, 21.0f, 490.0f, 23.0f, 488.0f, 25.0f, 486.0f, 27.0f, 484.0f, 29.0f, 482.0f, 31.0f, 480.0f, 33.0f, 478.0f, 35.0f, 476.0f, 37.0f, 474.0f, 39.0f, 472.0f, 41.0f, 470.0f, 43.0f, 468.0f, 45.0f, 466.0f, 47.0f, 464.0f, 49.0f, 462.0f, 51.0f, 460.0f, 53.0f, 458.0f, 55.0f, 456.0f, 57.0f, 454.0f, 59.0f, 452.0f, 61.0f, 450.0f, 63.0f, 448.0f, 65.0f, 446.0f, 67.0f, 444.0f, 69.0f, 442.0f, 71.0f, 440.0f, 73.0f, 438.0f, 75.0f, 436.0f, 77.0f, 434.0f, 79.0f, 432.0f, 81.0f, 430.0f, 83.0f, 428.0f, 85.0f, 426.0f, 87.0f, 424.0f, 89.0f, 422.0f, 91.0f, 420.0f, 93.0f, 418.0f, 95.0f, 416.0f, 97.0f, 414.0f, 99.0f, 412.0f, 101.0f, 410.0f, 103.0f, 408.0f, 105.0f, 406.0f, 107.0f, 404.0f, 109.0f, 402.0f, 111.0f, 400.0f, 113.0f, 398.0f, 115.0f, 396.0f, 117.0f, 394.0f, 119.0f, 392.0f, 121.0f, 390.0f, 123.0f, 388.0f, 125.0f, 386.0f, 127.0f, 384.0f, 129.0f, 382.0f, 131.0f, 380.0f, 133.0f, 378.0f, 135.0f, 376.0f, 137.0f, 374.0f, 139.0f, 372.0f, 141.0f, 370.0f, 143.0f, 368.0f, 145.0f, 366.0f, 147.0f, 364.0f, 149.0f, 362.0f, 151.0f, 360.0f, 153.0f, 358.0f, 155.0f, 356.0f, 157.0f, 354.0f, 159.0f, 352.0f, 161.0f, 350.0f, 163.0f, 348.0f, 165.0f, 346.0f, 167.0f, 344.0f, 169.0f, 342.0f, 171.0f, 340.0f, 173.0f, 338.0f, 175.0f, 336.0f, 177.0f, 334.0f, 179.0f, 332.0f, 181.0f, 330.0f, 183.0f, 328.0f, 185.0f, 326.0f, 187.0f, 324.0f, 189.0f, 322.0f, 191.0f, 320.0f, 193.0f, 318.0f, 195.0f, 316.0f, 197.0f, 314.0f, 199.0f, 312.0f, 201.0f, 310.0f, 203.0f, 308.0f, 205.0f, 306.0f, 207.0f, 304.0f, 209.0f, 302.0f, 211.0f, 300.0f, 213.0f, 298.0f, 215.0f, 296.0f, 217.0f, 294.0f, 219.0f, 292.0f, 221.0f, 290.0f, 223.0f, 288.0f, 225.0f, 286.0f, 227.0f, 284.0f, 229.0f, 282.0f, 231.0f, 280.0f, 233.0f, 278.0f, 235.0f, 276.0f, 237.0f, 274.0f, 239.0f, 272.0f, 241.0f, 270.0f, 243.0f, 268.0f, 245.0f, 266.0f, 247.0f, 264.0f, 249.0f, 262.0f, 251.0f, 260.0f, 253.0f, 258.0f, 255.0f, 256.0f, 257.0f, 254.0f, 259.0f, 252.0f, 261.0f, 250.0f, 263.0f, 248.0f, 265.0f, 246.0f, 267.0f, 244.0f, 269.0f, 242.0f, 271.0f, 240.0f, 273.0f, 238.0f, 275.0f, 236.0f, 277.0f, 234.0f, 279.0f, 232.0f, 281.0f, 230.0f, 283.0f, 228.0f, 285.0f, 226.0f, 287.0f, 224.0f, 289.0f, 222.0f, 291.0f, 220.0f, 293.0f, 218.0f, 295.0f, 216.0f, 297.0f, 214.0f, 299.0f, 212.0f, 301.0f, 210.0f, 303.0f, 208.0f, 305.0f, 206.0f, 307.0f, 204.0f, 309.0f, 202.0f, 311.0f, 200.0f, 313.0f, 198.0f, 315.0f, 196.0f, 317.0f, 194.0f, 319.0f, 192.0f, 321.0f, 190.0f, 323.0f, 188.0f, 325.0f, 186.0f, 327.0f, 184.0f, 329.0f, 182.0f, 331.0f, 180.0f, 333.0f, 178.0f, 335.0f, 176.0f, 337.0f, 174.0f, 339.0f, 172.0f, 341.0f, 170.0f, 343.0f, 168.0f, 345.0f, 166.0f, 347.0f, 164.0f, 349.0f, 162.0f, 351.0f, 160.0f, 353.0f, 158.0f, 355.0f, 156.0f, 357.0f, 154.0f, 359.0f, 152.0f, 361.0f, 150.0f, 363.0f, 148.0f, 365.0f, 146.0f, 367.0f, 144.0f, 369.0f, 142.0f, 371.0f, 140.0f, 373.0f, 138.0f, 375.0f, 136.0f, 377.0f, 134.0f, 379.0f, 132.0f, 381.0f, 130.0f, 383.0f, 128.0f, 385.0f, 126.0f, 387.0f, 124.0f, 389.0f, 122.0f, 391.0f, 120.0f, 393.0f, 118.0f, 395.0f, 116.0f, 397.0f, 114.0f, 399.0f, 112.0f, 401.0f, 110.0f, 403.0f, 108.0f, 405.0f, 106.0f, 407.0f, 104.0f, 409.0f, 102.0f, 411.0f, 100.0f, 413.0f, 98.0f, 415.0f, 96.0f, 417.0f, 94.0f, 419.0f, 92.0f, 421.0f, 90.0f, 423.0f, 88.0f, 425.0f, 86.0f, 427.0f, 84.0f, 429.0f, 82.0f, 431.0f, 80.0f, 433.0f, 78.0f, 435.0f, 76.0f, 437.0f, 74.0f, 439.0f, 72.0f, 441.0f, 70.0f, 443.0f, 68.0f, 445.0f, 66.0f, 447.0f, 64.0f, 449.0f, 62.0f, 451.0f, 60.0f, 453.0f, 58.0f, 455.0f, 56.0f, 457.0f, 54.0f, 459.0f, 52.0f, 461.0f, 50.0f, 463.0f, 48.0f, 465.0f, 46.0f, 467.0f, 44.0f, 469.0f, 42.0f, 471.0f, 40.0f, 473.0f, 38.0f, 475.0f, 36.0f, 477.0f, 34.0f, 479.0f, 32.0f, 481.0f, 30.0f, 483.0f, 28.0f, 485.0f, 26.0f, 487.0f, 24.0f, 489.0f, 22.0f, 491.0f, 20.0f, 493.0f, 18.0f, 495.0f, 16.0f, 497.0f, 14.0f, 499.0f, 12.0f, 501.0f, 10.0f, 503.0f, 8.0f, 505.0f, 6.0f, 507.0f, 4.0f, 509.0f, 2.0f, 511.0f, 0.0f, 1.0f, 510.0f, 3.0f, 508.0f, 5.0f, 506.0f, 7.0f, 504.0f, 9.0f, 502.0f, 11.0f, 500.0f, 13.0f, 498.0f, 15.0f, 496.0f, 17.0f, 494.0f, 19.0f, 492.0f, 21.0f, 490.0f, 23.0f, 488.0f, 25.0f, 486.0f, 27.0f, 484.0f, 29.0f, 482.0f, 31.0f, 480.0f, 33.0f, 478.0f, 35.0f, 476.0f, 37.0f, 474.0f, 39.0f, 472.0f, 41.0f, 470.0f, 43.0f, 468.0f, 45.0f, 466.0f, 47.0f, 464.0f, 49.0f, 462.0f, 51.0f, 460.0f, 53.0f, 458.0f, 55.0f, 456.0f, 57.0f, 454.0f, 59.0f, 452.0f, 61.0f, 450.0f, 63.0f, 448.0f, 65.0f, 446.0f, 67.0f, 444.0f, 69.0f, 442.0f, 71.0f, 440.0f, 73.0f, 438.0f, 75.0f, 436.0f, 77.0f, 434.0f, 79.0f, 432.0f, 81.0f, 430.0f, 83.0f, 428.0f, 85.0f, 426.0f, 87.0f, 424.0f, 89.0f, 422.0f, 91.0f, 420.0f, 93.0f, 418.0f, 95.0f, 416.0f, 97.0f, 414.0f, 99.0f, 412.0f, 101.0f, 410.0f, 103.0f, 408.0f, 105.0f, 406.0f, 107.0f, 404.0f, 109.0f, 402.0f, 111.0f, 400.0f, 113.0f, 398.0f, 115.0f, 396.0f, 117.0f, 394.0f, 119.0f, 392.0f, 121.0f, 390.0f, 123.0f, 388.0f, 125.0f, 386.0f, 127.0f, 384.0f, 129.0f, 382.0f, 131.0f, 380.0f, 133.0f, 378.0f, 135.0f, 376.0f, 137.0f, 374.0f, 139.0f, 372.0f, 141.0f, 370.0f, 143.0f, 368.0f, 145.0f, 366.0f, 147.0f, 364.0f, 149.0f, 362.0f, 151.0f, 360.0f, 153.0f, 358.0f, 155.0f, 356.0f, 157.0f, 354.0f, 159.0f, 352.0f, 161.0f, 350.0f, 163.0f, 348.0f, 165.0f, 346.0f, 167.0f, 344.0f, 169.0f, 342.0f, 171.0f, 340.0f, 173.0f, 338.0f, 175.0f, 336.0f, 177.0f, 334.0f, 179.0f, 332.0f, 181.0f, 330.0f, 183.0f, 328.0f, 185.0f, 326.0f, 187.0f, 324.0f, 189.0f, 322.0f, 191.0f, 320.0f, 193.0f, 318.0f, 195.0f, 316.0f, 197.0f, 314.0f, 199.0f, 312.0f, 201.0f, 310.0f, 203.0f, 308.0f, 205.0f, 306.0f, 207.0f, 304.0f, 209.0f, 302.0f, 211.0f, 300.0f, 213.0f, 298.0f, 215.0f, 296.0f, 217.0f, 294.0f, 219.0f, 292.0f, 221.0f, 290.0f, 223.0f, 288.0f, 225.0f, 286.0f, 227.0f, 284.0f, 229.0f, 282.0f, 231.0f, 280.0f, 233.0f, 278.0f, 235.0f, 276.0f, 237.0f, 274.0f, 239.0f, 272.0f, 241.0f, 270.0f, 243.0f, 268.0f, 245.0f, 266.0f, 247.0f, 264.0f, 249.0f, 262.0f, 251.0f, 260.0f, 253.0f, 258.0f, 255.0f, 256.0f, 257.0f, 254.0f, 259.0f, 252.0f, 261.0f, 250.0f, 263.0f, 248.0f, 265.0f, 246.0f, 267.0f, 244.0f, 269.0f, 242.0f, 271.0f, 240.0f, 273.0f, 238.0f, 275.0f, 236.0f, 277.0f, 234.0f, 279.0f, 232.0f, 281.0f, 230.0f, 283.0f, 228.0f, 285.0f, 226.0f, 287.0f, 224.0f, 289.0f, 222.0f, 291.0f, 220.0f, 293.0f, 218.0f, 295.0f, 216.0f, 297.0f, 214.0f, 299.0f, 212.0f, 301.0f, 210.0f, 303.0f, 208.0f, 305.0f, 206.0f, 307.0f, 204.0f, 309.0f, 202.0f, 311.0f, 200.0f, 313.0f, 198.0f, 315.0f, 196.0f, 317.0f, 194.0f, 319.0f, 192.0f, 321.0f, 190.0f, 323.0f, 188.0f, 325.0f, 186.0f, 327.0f, 184.0f, 329.0f, 182.0f, 331.0f, 180.0f, 333.0f, 178.0f, 335.0f, 176.0f, 337.0f, 174.0f, 339.0f, 172.0f, 341.0f, 170.0f, 343.0f, 168.0f, 345.0f, 166.0f, 347.0f, 164.0f, 349.0f, 162.0f, 351.0f, 160.0f, 353.0f, 158.0f, 355.0f, 156.0f, 357.0f, 154.0f, 359.0f, 152.0f, 361.0f, 150.0f, 363.0f, 148.0f, 365.0f, 146.0f, 367.0f, 144.0f, 369.0f, 142.0f, 371.0f, 140.0f, 373.0f, 138.0f, 375.0f, 136.0f, 377.0f, 134.0f, 379.0f, 132.0f, 381.0f, 130.0f, 383.0f, 128.0f, 385.0f, 126.0f, 387.0f, 124.0f, 389.0f, 122.0f, 391.0f, 120.0f, 393.0f, 118.0f, 395.0f, 116.0f, 397.0f, 114.0f, 399.0f, 112.0f, 401.0f, 110.0f, 403.0f, 108.0f, 405.0f, 106.0f, 407.0f, 104.0f, 409.0f, 102.0f, 411.0f, 100.0f, 413.0f, 98.0f, 415.0f, 96.0f, 417.0f, 94.0f, 419.0f, 92.0f, 421.0f, 90.0f, 423.0f, 88.0f, 425.0f, 86.0f, 427.0f, 84.0f, 429.0f, 82.0f, 431.0f, 80.0f, 433.0f, 78.0f, 435.0f, 76.0f, 437.0f, 74.0f, 439.0f, 72.0f, 441.0f, 70.0f, 443.0f, 68.0f, 445.0f, 66.0f, 447.0f, 64.0f, 449.0f, 62.0f, 451.0f, 60.0f, 453.0f, 58.0f, 455.0f, 56.0f, 457.0f, 54.0f, 459.0f, 52.0f, 461.0f, 50.0f, 463.0f, 48.0f, 465.0f, 46.0f, 467.0f, 44.0f, 469.0f, 42.0f, 471.0f, 40.0f, 473.0f, 38.0f, 475.0f, 36.0f, 477.0f, 34.0f, 479.0f, 32.0f, 481.0f, 30.0f, 483.0f, 28.0f, 485.0f, 26.0f, 487.0f, 24.0f, 489.0f, 22.0f, 491.0f, 20.0f, 493.0f, 18.0f, 495.0f, 16.0f, 497.0f, 14.0f, 499.0f, 12.0f, 501.0f, 10.0f, 503.0f, 8.0f, 505.0f, 6.0f, 507.0f, 4.0f, 509.0f, 2.0f, 511.0f}}},
@@ -25,7 +26,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.5f, 0.7310585786300049f, 1.0f, 0.9525741268224334f, 1.0f, 0.9933071490757153f, 1.0f, 0.9990889488055994f, 1.0f, 0.9998766054240137f, 1.0f, 0.999983298578152f, 1.0f, 0.999997739675702f, 1.0f, 0.999999694097773f, 1.0f, 0.9999999586006244f, 1.0f, 0.9999999943972036f, 1.0f, 0.9999999992417439f, 1.0f, 0.9999999998973812f, 1.0f, 0.999999999986112f, 1.0f, 0.9999999999981204f, 1.0f, 0.9999999999997455f, 1.0f, 0.9999999999999656f, 1.0f, 0.9999999999999953f, 1.0f, 0.9999999999999993f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.9999999999999998f, 1.0f, 0.9999999999999982f, 1.0f, 0.9999999999999873f, 1.0f, 0.9999999999999065f, 1.0f, 0.9999999999993086f, 1.0f, 0.999999999994891f, 1.0f, 0.9999999999622486f, 1.0f, 0.9999999997210531f, 1.0f, 0.9999999979388463f, 1.0f, 0.9999999847700205f, 1.0f, 0.9999998874648379f, 1.0f, 0.9999991684719722f, 1.0f, 0.9999938558253978f, 1.0f, 0.9999546021312976f, 1.0f, 0.9996646498695336f, 1.0f, 0.9975273768433653f, 1.0f, 0.9820137900379085f, 1.0f, 0.8807970779778823f, 1.0f, 0.5f, 0.7310585786300049f, 1.0f, 0.9525741268224334f, 1.0f, 0.9933071490757153f, 1.0f, 0.9990889488055994f, 1.0f, 0.9998766054240137f, 1.0f, 0.999983298578152f, 1.0f, 0.999997739675702f, 1.0f, 0.999999694097773f, 1.0f, 0.9999999586006244f, 1.0f, 0.9999999943972036f, 1.0f, 0.9999999992417439f, 1.0f, 0.9999999998973812f, 1.0f, 0.999999999986112f, 1.0f, 0.9999999999981204f, 1.0f, 0.9999999999997455f, 1.0f, 0.9999999999999656f, 1.0f, 0.9999999999999953f, 1.0f, 0.9999999999999993f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.9999999999999998f, 1.0f, 0.9999999999999982f, 1.0f, 0.9999999999999873f, 1.0f, 0.9999999999999065f, 1.0f, 0.9999999999993086f, 1.0f, 0.999999999994891f, 1.0f, 0.9999999999622486f, 1.0f, 0.9999999997210531f, 1.0f, 0.9999999979388463f, 1.0f, 0.9999999847700205f, 1.0f, 0.9999998874648379f, 1.0f, 0.9999991684719722f, 1.0f, 0.9999938558253978f, 1.0f, 0.9999546021312976f, 1.0f, 0.9996646498695336f, 1.0f, 0.9975273768433653f, 1.0f, 0.9820137900379085f, 1.0f, 0.8807970779778823f, 1.0f, 0.5f, 0.7310585786300049f, 1.0f, 0.9525741268224334f, 1.0f, 0.9933071490757153f, 1.0f, 0.9990889488055994f, 1.0f, 0.9998766054240137f, 1.0f, 0.999983298578152f, 1.0f, 0.999997739675702f, 1.0f, 0.999999694097773f, 1.0f, 0.9999999586006244f, 1.0f, 0.9999999943972036f, 1.0f, 0.9999999992417439f, 1.0f, 0.9999999998973812f, 1.0f, 0.999999999986112f, 1.0f, 0.9999999999981204f, 1.0f, 0.9999999999997455f, 1.0f, 0.9999999999999656f, 1.0f, 0.9999999999999953f, 1.0f, 0.9999999999999993f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.9999999999999998f, 1.0f, 0.9999999999999982f, 1.0f, 0.9999999999999873f, 1.0f, 0.9999999999999065f, 1.0f, 0.9999999999993086f, 1.0f, 0.999999999994891f, 1.0f, 0.9999999999622486f, 1.0f, 0.9999999997210531f, 1.0f, 0.9999999979388463f, 1.0f, 0.9999999847700205f, 1.0f, 0.9999998874648379f, 1.0f, 0.9999991684719722f, 1.0f, 0.9999938558253978f, 1.0f, 0.9999546021312976f, 1.0f, 0.9996646498695336f, 1.0f, 0.9975273768433653f, 1.0f, 0.9820137900379085f, 1.0f, 0.8807970779778823f, 1.0f, 0.5f, 0.7310585786300049f, 1.0f, 0.9525741268224334f, 1.0f, 0.9933071490757153f, 1.0f, 0.9990889488055994f, 1.0f, 0.9998766054240137f, 1.0f, 0.999983298578152f, 1.0f, 0.999997739675702f, 1.0f, 0.999999694097773f, 1.0f, 0.9999999586006244f, 1.0f, 0.9999999943972036f, 1.0f, 0.9999999992417439f, 1.0f, 0.9999999998973812f, 1.0f, 0.999999999986112f, 1.0f, 0.9999999999981204f, 1.0f, 0.9999999999997455f, 1.0f, 0.9999999999999656f, 1.0f, 0.9999999999999953f, 1.0f, 0.9999999999999993f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.9999999999999998f, 1.0f, 0.9999999999999982f, 1.0f, 0.9999999999999873f, 1.0f, 0.9999999999999065f, 1.0f, 0.9999999999993086f, 1.0f, 0.999999999994891f, 1.0f, 0.9999999999622486f, 1.0f, 0.9999999997210531f, 1.0f, 0.9999999979388463f, 1.0f, 0.9999999847700205f, 1.0f, 0.9999998874648379f, 1.0f, 0.9999991684719722f, 1.0f, 0.9999938558253978f, 1.0f, 0.9999546021312976f, 1.0f, 0.9996646498695336f, 1.0f, 0.9975273768433653f, 1.0f, 0.9820137900379085f, 1.0f, 0.8807970779778823f, 1.0f, 0.5f, 0.7310585786300049f, 1.0f, 0.9525741268224334f, 1.0f, 0.9933071490757153f, 1.0f, 0.9990889488055994f, 1.0f, 0.9998766054240137f, 1.0f, 0.999983298578152f, 1.0f, 0.999997739675702f, 1.0f, 0.999999694097773f, 1.0f, 0.9999999586006244f, 1.0f, 0.9999999943972036f, 1.0f, 0.9999999992417439f, 1.0f, 0.9999999998973812f, 1.0f, 0.999999999986112f, 1.0f, 0.9999999999981204f, 1.0f, 0.9999999999997455f, 1.0f, 0.9999999999999656f, 1.0f, 0.9999999999999953f, 1.0f, 0.9999999999999993f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.9999999999999998f, 1.0f, 0.9999999999999982f, 1.0f, 0.9999999999999873f, 1.0f, 0.9999999999999065f, 1.0f, 0.9999999999993086f, 1.0f, 0.999999999994891f, 1.0f, 0.9999999999622486f, 1.0f, 0.9999999997210531f, 1.0f, 0.9999999979388463f, 1.0f, 0.9999999847700205f, 1.0f, 0.9999998874648379f, 1.0f, 0.9999991684719722f, 1.0f, 0.9999938558253978f, 1.0f, 0.9999546021312976f, 1.0f, 0.9996646498695336f, 1.0f, 0.9975273768433653f, 1.0f, 0.9820137900379085f, 1.0f, 0.8807970779778823f, 1.0f, 0.5f, 0.7310585786300049f, 1.0f, 0.9525741268224334f, 1.0f, 0.9933071490757153f, 1.0f, 0.9990889488055994f, 1.0f, 0.9998766054240137f, 1.0f, 0.999983298578152f, 1.0f, 0.999997739675702f, 1.0f, 0.999999694097773f, 1.0f, 0.9999999586006244f, 1.0f, 0.9999999943972036f, 1.0f, 0.9999999992417439f, 1.0f, 0.9999999998973812f, 1.0f, 0.999999999986112f, 1.0f, 0.9999999999981204f, 1.0f, 0.9999999999997455f, 1.0f, 0.9999999999999656f, 1.0f, 0.9999999999999953f, 1.0f, 0.9999999999999993f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.9999999999999998f, 1.0f, 0.9999999999999982f, 1.0f, 0.9999999999999873f, 1.0f, 0.9999999999999065f, 1.0f, 0.9999999999993086f, 1.0f, 0.999999999994891f, 1.0f, 0.9999999999622486f, 1.0f, 0.9999999997210531f, 1.0f, 0.9999999979388463f, 1.0f, 0.9999999847700205f, 1.0f, 0.9999998874648379f, 1.0f, 0.9999991684719722f, 1.0f, 0.9999938558253978f, 1.0f, 0.9999546021312976f, 1.0f, 0.9996646498695336f, 1.0f, 0.9975273768433653f, 1.0f, 0.9820137900379085f, 1.0f, 0.8807970779778823f, 1.0f, 0.5f, 0.7310585786300049f, 1.0f, 0.9525741268224334f, 1.0f, 0.9933071490757153f, 1.0f, 0.9990889488055994f, 1.0f, 0.9998766054240137f, 1.0f, 0.999983298578152f, 1.0f, 0.999997739675702f, 1.0f, 0.999999694097773f, 1.0f, 0.9999999586006244f, 1.0f, 0.9999999943972036f, 1.0f, 0.9999999992417439f, 1.0f, 0.9999999998973812f, 1.0f, 0.999999999986112f, 1.0f, 0.9999999999981204f, 1.0f, 0.9999999999997455f, 1.0f, 0.9999999999999656f, 1.0f, 0.9999999999999953f, 1.0f, 0.9999999999999993f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.9999999999999998f, 1.0f, 0.9999999999999982f, 1.0f, 0.9999999999999873f, 1.0f, 0.9999999999999065f, 1.0f, 0.9999999999993086f, 1.0f, 0.999999999994891f, 1.0f, 0.9999999999622486f, 1.0f, 0.9999999997210531f, 1.0f, 0.9999999979388463f, 1.0f, 0.9999999847700205f, 1.0f, 0.9999998874648379f, 1.0f, 0.9999991684719722f, 1.0f, 0.9999938558253978f, 1.0f, 0.9999546021312976f, 1.0f, 0.9996646498695336f, 1.0f, 0.9975273768433653f, 1.0f, 0.9820137900379085f, 1.0f, 0.8807970779778823f, 1.0f, 0.5f, 0.7310585786300049f, 1.0f, 0.9525741268224334f, 1.0f, 0.9933071490757153f, 1.0f, 0.9990889488055994f, 1.0f, 0.9998766054240137f, 1.0f, 0.999983298578152f, 1.0f, 0.999997739675702f, 1.0f, 0.999999694097773f, 1.0f, 0.9999999586006244f, 1.0f, 0.9999999943972036f, 1.0f, 0.9999999992417439f, 1.0f, 0.9999999998973812f, 1.0f, 0.999999999986112f, 1.0f, 0.9999999999981204f, 1.0f, 0.9999999999997455f, 1.0f, 0.9999999999999656f, 1.0f, 0.9999999999999953f, 1.0f, 0.9999999999999993f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.9999999999999998f, 1.0f, 0.9999999999999982f, 1.0f, 0.9999999999999873f, 1.0f, 0.9999999999999065f, 1.0f, 0.9999999999993086f, 1.0f, 0.999999999994891f, 1.0f, 0.9999999999622486f, 1.0f, 0.9999999997210531f, 1.0f, 0.9999999979388463f, 1.0f, 0.9999999847700205f, 1.0f, 0.9999998874648379f, 1.0f, 0.9999991684719722f, 1.0f, 0.9999938558253978f, 1.0f, 0.9999546021312976f, 1.0f, 0.9996646498695336f, 1.0f, 0.9975273768433653f, 1.0f, 0.9820137900379085f, 1.0f, 0.8807970779778823f, 1.0f, 0.5f, 0.7310585786300049f, 1.0f, 0.9525741268224334f, 1.0f, 0.9933071490757153f, 1.0f, 0.9990889488055994f, 1.0f, 0.9998766054240137f, 1.0f, 0.999983298578152f, 1.0f, 0.999997739675702f, 1.0f, 0.999999694097773f, 1.0f, 0.9999999586006244f, 1.0f, 0.9999999943972036f, 1.0f, 0.9999999992417439f, 1.0f, 0.9999999998973812f, 1.0f, 0.999999999986112f, 1.0f, 0.9999999999981204f, 1.0f, 0.9999999999997455f, 1.0f, 0.9999999999999656f, 1.0f, 0.9999999999999953f, 1.0f, 0.9999999999999993f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.9999999999999998f, 1.0f, 0.9999999999999982f, 1.0f, 0.9999999999999873f, 1.0f, 0.9999999999999065f, 1.0f, 0.9999999999993086f, 1.0f, 0.999999999994891f, 1.0f, 0.9999999999622486f, 1.0f, 0.9999999997210531f, 1.0f, 0.9999999979388463f, 1.0f, 0.9999999847700205f, 1.0f, 0.9999998874648379f, 1.0f, 0.9999991684719722f, 1.0f, 0.9999938558253978f, 1.0f, 0.9999546021312976f, 1.0f, 0.9996646498695336f, 1.0f, 0.9975273768433653f, 1.0f, 0.9820137900379085f, 1.0f, 0.8807970779778823f, 1.0f, 0.5f, 0.7310585786300049f, 1.0f, 0.9525741268224334f, 1.0f, 0.9933071490757153f, 1.0f, 0.9990889488055994f, 1.0f, 0.9998766054240137f, 1.0f, 0.999983298578152f, 1.0f, 0.999997739675702f, 1.0f, 0.999999694097773f, 1.0f, 0.9999999586006244f, 1.0f, 0.9999999943972036f, 1.0f, 0.9999999992417439f, 1.0f, 0.9999999998973812f, 1.0f, 0.999999999986112f, 1.0f, 0.9999999999981204f, 1.0f, 0.9999999999997455f, 1.0f, 0.9999999999999656f, 1.0f, 0.9999999999999953f, 1.0f, 0.9999999999999993f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.9999999999999998f, 1.0f, 0.9999999999999982f, 1.0f, 0.9999999999999873f, 1.0f, 0.9999999999999065f, 1.0f, 0.9999999999993086f, 1.0f, 0.999999999994891f, 1.0f, 0.9999999999622486f, 1.0f, 0.9999999997210531f, 1.0f, 0.9999999979388463f, 1.0f, 0.9999999847700205f, 1.0f, 0.9999998874648379f, 1.0f, 0.9999991684719722f, 1.0f, 0.9999938558253978f, 1.0f, 0.9999546021312976f, 1.0f, 0.9996646498695336f, 1.0f, 0.9975273768433653f, 1.0f, 0.9820137900379085f, 1.0f, 0.8807970779778823f, 1.0f}}},
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/logistic_float_1.example.cpp b/nn/runtime/test/generated/examples/logistic_float_1.example.cpp
index 5d5d458..8f5fe45 100644
--- a/nn/runtime/test/generated/examples/logistic_float_1.example.cpp
+++ b/nn/runtime/test/generated/examples/logistic_float_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: logistic_float_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/logistic_float_1_relaxed.example.cpp b/nn/runtime/test/generated/examples/logistic_float_1_relaxed.example.cpp
index 83f1bf7..a4851a6 100644
--- a/nn/runtime/test/generated/examples/logistic_float_1_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/logistic_float_1_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: logistic_float_1_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/logistic_float_2.example.cpp b/nn/runtime/test/generated/examples/logistic_float_2.example.cpp
index 828e204..a2c6a64 100644
--- a/nn/runtime/test/generated/examples/logistic_float_2.example.cpp
+++ b/nn/runtime/test/generated/examples/logistic_float_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: logistic_float_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/logistic_float_2_relaxed.example.cpp b/nn/runtime/test/generated/examples/logistic_float_2_relaxed.example.cpp
index ff83646..85e2c33 100644
--- a/nn/runtime/test/generated/examples/logistic_float_2_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/logistic_float_2_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: logistic_float_2_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/logistic_quant8_1.example.cpp b/nn/runtime/test/generated/examples/logistic_quant8_1.example.cpp
index 92c4f8e..d957d35 100644
--- a/nn/runtime/test/generated/examples/logistic_quant8_1.example.cpp
+++ b/nn/runtime/test/generated/examples/logistic_quant8_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: logistic_quant8_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/logistic_quant8_2.example.cpp b/nn/runtime/test/generated/examples/logistic_quant8_2.example.cpp
index 530bbf8..a4920d7 100644
--- a/nn/runtime/test/generated/examples/logistic_quant8_2.example.cpp
+++ b/nn/runtime/test/generated/examples/logistic_quant8_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: logistic_quant8_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/lsh_projection.example.cpp b/nn/runtime/test/generated/examples/lsh_projection.example.cpp
index 8eb12df..f9c6c81 100644
--- a/nn/runtime/test/generated/examples/lsh_projection.example.cpp
+++ b/nn/runtime/test/generated/examples/lsh_projection.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: lsh_projection.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/lsh_projection_2.example.cpp b/nn/runtime/test/generated/examples/lsh_projection_2.example.cpp
index 52b0e32..ce6537b 100644
--- a/nn/runtime/test/generated/examples/lsh_projection_2.example.cpp
+++ b/nn/runtime/test/generated/examples/lsh_projection_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: lsh_projection_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/lsh_projection_2_relaxed.example.cpp b/nn/runtime/test/generated/examples/lsh_projection_2_relaxed.example.cpp
index 624a36b..eed8502 100644
--- a/nn/runtime/test/generated/examples/lsh_projection_2_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/lsh_projection_2_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: lsh_projection_2_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/lsh_projection_3_relaxed.example.cpp b/nn/runtime/test/generated/examples/lsh_projection_3_relaxed.example.cpp
index 4f5ca0b..03b5627 100644
--- a/nn/runtime/test/generated/examples/lsh_projection_3_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/lsh_projection_3_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: lsh_projection_3_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/lsh_projection_4_relaxed.example.cpp b/nn/runtime/test/generated/examples/lsh_projection_4_relaxed.example.cpp
index c7ff59b..e85830d 100644
--- a/nn/runtime/test/generated/examples/lsh_projection_4_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/lsh_projection_4_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: lsh_projection_4_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/lsh_projection_deprecated.example.cpp b/nn/runtime/test/generated/examples/lsh_projection_deprecated.example.cpp
index 582ab8f..0491633 100644
--- a/nn/runtime/test/generated/examples/lsh_projection_deprecated.example.cpp
+++ b/nn/runtime/test/generated/examples/lsh_projection_deprecated.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: lsh_projection_deprecated.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/lsh_projection_relaxed.example.cpp b/nn/runtime/test/generated/examples/lsh_projection_relaxed.example.cpp
index d0321a2..48c9612 100644
--- a/nn/runtime/test/generated/examples/lsh_projection_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/lsh_projection_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: lsh_projection_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/lsh_projection_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/lsh_projection_weights_as_inputs.example.cpp
index 518ff7f..c018af4 100644
--- a/nn/runtime/test/generated/examples/lsh_projection_weights_as_inputs.example.cpp
+++ b/nn/runtime/test/generated/examples/lsh_projection_weights_as_inputs.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: lsh_projection_weights_as_inputs.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/lsh_projection_weights_as_inputs_relaxed.example.cpp b/nn/runtime/test/generated/examples/lsh_projection_weights_as_inputs_relaxed.example.cpp
index 7a98b63..be15d14 100644
--- a/nn/runtime/test/generated/examples/lsh_projection_weights_as_inputs_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/lsh_projection_weights_as_inputs_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: lsh_projection_weights_as_inputs_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/lstm.example.cpp b/nn/runtime/test/generated/examples/lstm.example.cpp
index 6e0db75..aaa6e26 100644
--- a/nn/runtime/test/generated/examples/lstm.example.cpp
+++ b/nn/runtime/test/generated/examples/lstm.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: lstm.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/lstm2.example.cpp b/nn/runtime/test/generated/examples/lstm2.example.cpp
index 51aff38..72249b3 100644
--- a/nn/runtime/test/generated/examples/lstm2.example.cpp
+++ b/nn/runtime/test/generated/examples/lstm2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: lstm2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/lstm2_relaxed.example.cpp b/nn/runtime/test/generated/examples/lstm2_relaxed.example.cpp
index c1a672a..3958b75 100644
--- a/nn/runtime/test/generated/examples/lstm2_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/lstm2_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: lstm2_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/lstm2_state.example.cpp b/nn/runtime/test/generated/examples/lstm2_state.example.cpp
index 807dbf8..1d03232 100644
--- a/nn/runtime/test/generated/examples/lstm2_state.example.cpp
+++ b/nn/runtime/test/generated/examples/lstm2_state.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: lstm2_state.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/lstm2_state2.example.cpp b/nn/runtime/test/generated/examples/lstm2_state2.example.cpp
index 9d60d48..abbc41e 100644
--- a/nn/runtime/test/generated/examples/lstm2_state2.example.cpp
+++ b/nn/runtime/test/generated/examples/lstm2_state2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: lstm2_state2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/lstm2_state2_relaxed.example.cpp b/nn/runtime/test/generated/examples/lstm2_state2_relaxed.example.cpp
index 135604a..a0b2405 100644
--- a/nn/runtime/test/generated/examples/lstm2_state2_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/lstm2_state2_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: lstm2_state2_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/lstm2_state_relaxed.example.cpp b/nn/runtime/test/generated/examples/lstm2_state_relaxed.example.cpp
index 76839e5..5756c9e 100644
--- a/nn/runtime/test/generated/examples/lstm2_state_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/lstm2_state_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: lstm2_state_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/lstm3.example.cpp b/nn/runtime/test/generated/examples/lstm3.example.cpp
index dc921c0..4b55e01 100644
--- a/nn/runtime/test/generated/examples/lstm3.example.cpp
+++ b/nn/runtime/test/generated/examples/lstm3.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: lstm3.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/lstm3_relaxed.example.cpp b/nn/runtime/test/generated/examples/lstm3_relaxed.example.cpp
index 81eb507..22dd654 100644
--- a/nn/runtime/test/generated/examples/lstm3_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/lstm3_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: lstm3_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/lstm3_state.example.cpp b/nn/runtime/test/generated/examples/lstm3_state.example.cpp
index c99047b..87010d3 100644
--- a/nn/runtime/test/generated/examples/lstm3_state.example.cpp
+++ b/nn/runtime/test/generated/examples/lstm3_state.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: lstm3_state.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/lstm3_state2.example.cpp b/nn/runtime/test/generated/examples/lstm3_state2.example.cpp
index 09c67fa..94f336a 100644
--- a/nn/runtime/test/generated/examples/lstm3_state2.example.cpp
+++ b/nn/runtime/test/generated/examples/lstm3_state2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: lstm3_state2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/lstm3_state2_relaxed.example.cpp b/nn/runtime/test/generated/examples/lstm3_state2_relaxed.example.cpp
index 28aa4a7..1a30fdb 100644
--- a/nn/runtime/test/generated/examples/lstm3_state2_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/lstm3_state2_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: lstm3_state2_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/lstm3_state3.example.cpp b/nn/runtime/test/generated/examples/lstm3_state3.example.cpp
index d310029..2f031b8 100644
--- a/nn/runtime/test/generated/examples/lstm3_state3.example.cpp
+++ b/nn/runtime/test/generated/examples/lstm3_state3.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: lstm3_state3.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/lstm3_state3_relaxed.example.cpp b/nn/runtime/test/generated/examples/lstm3_state3_relaxed.example.cpp
index f855b4c..3202ece 100644
--- a/nn/runtime/test/generated/examples/lstm3_state3_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/lstm3_state3_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: lstm3_state3_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/lstm3_state_relaxed.example.cpp b/nn/runtime/test/generated/examples/lstm3_state_relaxed.example.cpp
index 3f443b9..68426d4 100644
--- a/nn/runtime/test/generated/examples/lstm3_state_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/lstm3_state_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: lstm3_state_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/lstm_relaxed.example.cpp b/nn/runtime/test/generated/examples/lstm_relaxed.example.cpp
index d78ab36..79f4cbe 100644
--- a/nn/runtime/test/generated/examples/lstm_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/lstm_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: lstm_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/lstm_state.example.cpp b/nn/runtime/test/generated/examples/lstm_state.example.cpp
index 7b6d827..5620f77 100644
--- a/nn/runtime/test/generated/examples/lstm_state.example.cpp
+++ b/nn/runtime/test/generated/examples/lstm_state.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: lstm_state.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/lstm_state2.example.cpp b/nn/runtime/test/generated/examples/lstm_state2.example.cpp
index 2452bf3..60314c4 100644
--- a/nn/runtime/test/generated/examples/lstm_state2.example.cpp
+++ b/nn/runtime/test/generated/examples/lstm_state2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: lstm_state2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/lstm_state2_relaxed.example.cpp b/nn/runtime/test/generated/examples/lstm_state2_relaxed.example.cpp
index 6f569ae..ccf0b5a 100644
--- a/nn/runtime/test/generated/examples/lstm_state2_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/lstm_state2_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: lstm_state2_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/lstm_state_relaxed.example.cpp b/nn/runtime/test/generated/examples/lstm_state_relaxed.example.cpp
index 18fb3ac..946b246 100644
--- a/nn/runtime/test/generated/examples/lstm_state_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/lstm_state_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: lstm_state_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/max_pool_float_1.example.cpp b/nn/runtime/test/generated/examples/max_pool_float_1.example.cpp
index 5070749..d24bb20 100644
--- a/nn/runtime/test/generated/examples/max_pool_float_1.example.cpp
+++ b/nn/runtime/test/generated/examples/max_pool_float_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: max_pool_float_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/max_pool_float_1_relaxed.example.cpp b/nn/runtime/test/generated/examples/max_pool_float_1_relaxed.example.cpp
index 44b264e..22e6d5c 100644
--- a/nn/runtime/test/generated/examples/max_pool_float_1_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/max_pool_float_1_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: max_pool_float_1_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/max_pool_float_2.example.cpp b/nn/runtime/test/generated/examples/max_pool_float_2.example.cpp
index a53405c..268fb9f 100644
--- a/nn/runtime/test/generated/examples/max_pool_float_2.example.cpp
+++ b/nn/runtime/test/generated/examples/max_pool_float_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: max_pool_float_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/max_pool_float_2_relaxed.example.cpp b/nn/runtime/test/generated/examples/max_pool_float_2_relaxed.example.cpp
index ab05c6b..5b2fc90 100644
--- a/nn/runtime/test/generated/examples/max_pool_float_2_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/max_pool_float_2_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: max_pool_float_2_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/max_pool_float_3.example.cpp b/nn/runtime/test/generated/examples/max_pool_float_3.example.cpp
index a059522..e1197f5 100644
--- a/nn/runtime/test/generated/examples/max_pool_float_3.example.cpp
+++ b/nn/runtime/test/generated/examples/max_pool_float_3.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: max_pool_float_3.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/max_pool_float_3_relaxed.example.cpp b/nn/runtime/test/generated/examples/max_pool_float_3_relaxed.example.cpp
index fa11fb1..f9bb95c 100644
--- a/nn/runtime/test/generated/examples/max_pool_float_3_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/max_pool_float_3_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: max_pool_float_3_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/max_pool_float_4.example.cpp b/nn/runtime/test/generated/examples/max_pool_float_4.example.cpp
index 59d6786..ce55b46 100644
--- a/nn/runtime/test/generated/examples/max_pool_float_4.example.cpp
+++ b/nn/runtime/test/generated/examples/max_pool_float_4.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: max_pool_float_4.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/max_pool_float_4_relaxed.example.cpp b/nn/runtime/test/generated/examples/max_pool_float_4_relaxed.example.cpp
index a7a99c4..8b6aa2e 100644
--- a/nn/runtime/test/generated/examples/max_pool_float_4_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/max_pool_float_4_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: max_pool_float_4_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/max_pool_quant8_1.example.cpp b/nn/runtime/test/generated/examples/max_pool_quant8_1.example.cpp
index e034210..d6fcaf8 100644
--- a/nn/runtime/test/generated/examples/max_pool_quant8_1.example.cpp
+++ b/nn/runtime/test/generated/examples/max_pool_quant8_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: max_pool_quant8_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/max_pool_quant8_2.example.cpp b/nn/runtime/test/generated/examples/max_pool_quant8_2.example.cpp
index 81e3318..192a4cf 100644
--- a/nn/runtime/test/generated/examples/max_pool_quant8_2.example.cpp
+++ b/nn/runtime/test/generated/examples/max_pool_quant8_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: max_pool_quant8_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/max_pool_quant8_3.example.cpp b/nn/runtime/test/generated/examples/max_pool_quant8_3.example.cpp
index 589256c..4559df8 100644
--- a/nn/runtime/test/generated/examples/max_pool_quant8_3.example.cpp
+++ b/nn/runtime/test/generated/examples/max_pool_quant8_3.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: max_pool_quant8_3.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/max_pool_quant8_4.example.cpp b/nn/runtime/test/generated/examples/max_pool_quant8_4.example.cpp
index e111588..6858d1f 100644
--- a/nn/runtime/test/generated/examples/max_pool_quant8_4.example.cpp
+++ b/nn/runtime/test/generated/examples/max_pool_quant8_4.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: max_pool_quant8_4.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/max_pool_v1_2.example.cpp b/nn/runtime/test/generated/examples/max_pool_v1_2.example.cpp
index 8018aed..96bf162 100644
--- a/nn/runtime/test/generated/examples/max_pool_v1_2.example.cpp
+++ b/nn/runtime/test/generated/examples/max_pool_v1_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: max_pool_v1_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples_nhwc = {
+std::vector<MixedTypedExample>& get_examples_nhwc() {
+static std::vector<MixedTypedExample> examples_nhwc = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +108,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nchw = {
+std::vector<MixedTypedExample>& get_examples_nchw() {
+static std::vector<MixedTypedExample> examples_nchw = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_nchw;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -169,8 +182,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nchw_quant8() {
+static std::vector<MixedTypedExample> examples_nchw_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -203,8 +219,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_2() {
+static std::vector<MixedTypedExample> examples_nhwc_2 = {
 // Begin of an example
 {
 .operands = {
@@ -237,8 +256,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -271,8 +293,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8_2() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -305,8 +330,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_2() {
+static std::vector<MixedTypedExample> examples_nchw_2 = {
 // Begin of an example
 {
 .operands = {
@@ -339,8 +367,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -373,8 +404,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_quant8_2() {
+static std::vector<MixedTypedExample> examples_nchw_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -407,8 +441,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_3 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_3() {
+static std::vector<MixedTypedExample> examples_nhwc_3 = {
 // Begin of an example
 {
 .operands = {
@@ -441,8 +478,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_3;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed_3 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed_3() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed_3 = {
 // Begin of an example
 {
 .operands = {
@@ -475,8 +515,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed_3;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8_3 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8_3() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8_3 = {
 // Begin of an example
 {
 .operands = {
@@ -509,8 +552,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8_3;
+};
 
-std::vector<MixedTypedExample> examples_nchw_3 = {
+std::vector<MixedTypedExample>& get_examples_nchw_3() {
+static std::vector<MixedTypedExample> examples_nchw_3 = {
 // Begin of an example
 {
 .operands = {
@@ -543,8 +589,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_3;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed_3 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed_3() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed_3 = {
 // Begin of an example
 {
 .operands = {
@@ -577,8 +626,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed_3;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8_3 = {
+std::vector<MixedTypedExample>& get_examples_nchw_quant8_3() {
+static std::vector<MixedTypedExample> examples_nchw_quant8_3 = {
 // Begin of an example
 {
 .operands = {
@@ -611,8 +663,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8_3;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_4 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_4() {
+static std::vector<MixedTypedExample> examples_nhwc_4 = {
 // Begin of an example
 {
 .operands = {
@@ -645,8 +700,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_4;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed_4 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed_4() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed_4 = {
 // Begin of an example
 {
 .operands = {
@@ -679,8 +737,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed_4;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8_4 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8_4() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8_4 = {
 // Begin of an example
 {
 .operands = {
@@ -713,8 +774,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8_4;
+};
 
-std::vector<MixedTypedExample> examples_nchw_4 = {
+std::vector<MixedTypedExample>& get_examples_nchw_4() {
+static std::vector<MixedTypedExample> examples_nchw_4 = {
 // Begin of an example
 {
 .operands = {
@@ -747,8 +811,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_4;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed_4 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed_4() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed_4 = {
 // Begin of an example
 {
 .operands = {
@@ -781,8 +848,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed_4;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8_4 = {
+std::vector<MixedTypedExample>& get_examples_nchw_quant8_4() {
+static std::vector<MixedTypedExample> examples_nchw_quant8_4 = {
 // Begin of an example
 {
 .operands = {
@@ -815,4 +885,6 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8_4;
+};
 
diff --git a/nn/runtime/test/generated/examples/maximum.example.cpp b/nn/runtime/test/generated/examples/maximum.example.cpp
index 0188b0d..763422f 100644
--- a/nn/runtime/test/generated/examples/maximum.example.cpp
+++ b/nn/runtime/test/generated/examples/maximum.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: maximum.mod.py). Do not edit
-std::vector<MixedTypedExample> examples_simple = {
+std::vector<MixedTypedExample>& get_examples_simple() {
+static std::vector<MixedTypedExample> examples_simple = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples_simple;
+};
 
-std::vector<MixedTypedExample> examples_simple_relaxed = {
+std::vector<MixedTypedExample>& get_examples_simple_relaxed() {
+static std::vector<MixedTypedExample> examples_simple_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_simple_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_simple_quant8 = {
+std::vector<MixedTypedExample>& get_examples_simple_quant8() {
+static std::vector<MixedTypedExample> examples_simple_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +108,11 @@
 },
 }, // End of an example
 };
+return examples_simple_quant8;
+};
 
-std::vector<MixedTypedExample> examples_simple_int32 = {
+std::vector<MixedTypedExample>& get_examples_simple_int32() {
+static std::vector<MixedTypedExample> examples_simple_int32 = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_simple_int32;
+};
 
-std::vector<MixedTypedExample> examples_simple_float16 = {
+std::vector<MixedTypedExample>& get_examples_simple_float16() {
+static std::vector<MixedTypedExample> examples_simple_float16 = {
 // Begin of an example
 {
 .operands = {
@@ -169,8 +182,11 @@
 },
 }, // End of an example
 };
+return examples_simple_float16;
+};
 
-std::vector<MixedTypedExample> examples_broadcast = {
+std::vector<MixedTypedExample>& get_examples_broadcast() {
+static std::vector<MixedTypedExample> examples_broadcast = {
 // Begin of an example
 {
 .operands = {
@@ -203,8 +219,11 @@
 },
 }, // End of an example
 };
+return examples_broadcast;
+};
 
-std::vector<MixedTypedExample> examples_broadcast_relaxed = {
+std::vector<MixedTypedExample>& get_examples_broadcast_relaxed() {
+static std::vector<MixedTypedExample> examples_broadcast_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -237,8 +256,11 @@
 },
 }, // End of an example
 };
+return examples_broadcast_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_broadcast_quant8 = {
+std::vector<MixedTypedExample>& get_examples_broadcast_quant8() {
+static std::vector<MixedTypedExample> examples_broadcast_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -271,8 +293,11 @@
 },
 }, // End of an example
 };
+return examples_broadcast_quant8;
+};
 
-std::vector<MixedTypedExample> examples_broadcast_int32 = {
+std::vector<MixedTypedExample>& get_examples_broadcast_int32() {
+static std::vector<MixedTypedExample> examples_broadcast_int32 = {
 // Begin of an example
 {
 .operands = {
@@ -305,8 +330,11 @@
 },
 }, // End of an example
 };
+return examples_broadcast_int32;
+};
 
-std::vector<MixedTypedExample> examples_broadcast_float16 = {
+std::vector<MixedTypedExample>& get_examples_broadcast_float16() {
+static std::vector<MixedTypedExample> examples_broadcast_float16 = {
 // Begin of an example
 {
 .operands = {
@@ -339,4 +367,6 @@
 },
 }, // End of an example
 };
+return examples_broadcast_float16;
+};
 
diff --git a/nn/runtime/test/generated/examples/mean.example.cpp b/nn/runtime/test/generated/examples/mean.example.cpp
index 45e9973..f6b2207 100644
--- a/nn/runtime/test/generated/examples/mean.example.cpp
+++ b/nn/runtime/test/generated/examples/mean.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: mean.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/mean_float_1.example.cpp b/nn/runtime/test/generated/examples/mean_float_1.example.cpp
index 0157f75..7506847 100644
--- a/nn/runtime/test/generated/examples/mean_float_1.example.cpp
+++ b/nn/runtime/test/generated/examples/mean_float_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: mean_float_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/mean_float_1_relaxed.example.cpp b/nn/runtime/test/generated/examples/mean_float_1_relaxed.example.cpp
index 9aa4e5e..387983f 100644
--- a/nn/runtime/test/generated/examples/mean_float_1_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/mean_float_1_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: mean_float_1_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/mean_float_2.example.cpp b/nn/runtime/test/generated/examples/mean_float_2.example.cpp
index 8ee8128..9657b98 100644
--- a/nn/runtime/test/generated/examples/mean_float_2.example.cpp
+++ b/nn/runtime/test/generated/examples/mean_float_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: mean_float_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/mean_float_2_relaxed.example.cpp b/nn/runtime/test/generated/examples/mean_float_2_relaxed.example.cpp
index 917662d..b3efdb2 100644
--- a/nn/runtime/test/generated/examples/mean_float_2_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/mean_float_2_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: mean_float_2_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/mean_quant8_1.example.cpp b/nn/runtime/test/generated/examples/mean_quant8_1.example.cpp
index 2656c23..bb0ad31 100644
--- a/nn/runtime/test/generated/examples/mean_quant8_1.example.cpp
+++ b/nn/runtime/test/generated/examples/mean_quant8_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: mean_quant8_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/mean_quant8_2.example.cpp b/nn/runtime/test/generated/examples/mean_quant8_2.example.cpp
index 0511492..54a300f 100644
--- a/nn/runtime/test/generated/examples/mean_quant8_2.example.cpp
+++ b/nn/runtime/test/generated/examples/mean_quant8_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: mean_quant8_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/mean_relaxed.example.cpp b/nn/runtime/test/generated/examples/mean_relaxed.example.cpp
index f4f96eb..c944019 100644
--- a/nn/runtime/test/generated/examples/mean_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/mean_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: mean_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/minimum.example.cpp b/nn/runtime/test/generated/examples/minimum.example.cpp
index 250aa19..f3d663f 100644
--- a/nn/runtime/test/generated/examples/minimum.example.cpp
+++ b/nn/runtime/test/generated/examples/minimum.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: minimum.mod.py). Do not edit
-std::vector<MixedTypedExample> examples_simple = {
+std::vector<MixedTypedExample>& get_examples_simple() {
+static std::vector<MixedTypedExample> examples_simple = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples_simple;
+};
 
-std::vector<MixedTypedExample> examples_simple_relaxed = {
+std::vector<MixedTypedExample>& get_examples_simple_relaxed() {
+static std::vector<MixedTypedExample> examples_simple_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_simple_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_simple_quant8 = {
+std::vector<MixedTypedExample>& get_examples_simple_quant8() {
+static std::vector<MixedTypedExample> examples_simple_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +108,11 @@
 },
 }, // End of an example
 };
+return examples_simple_quant8;
+};
 
-std::vector<MixedTypedExample> examples_simple_int32 = {
+std::vector<MixedTypedExample>& get_examples_simple_int32() {
+static std::vector<MixedTypedExample> examples_simple_int32 = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_simple_int32;
+};
 
-std::vector<MixedTypedExample> examples_simple_float16 = {
+std::vector<MixedTypedExample>& get_examples_simple_float16() {
+static std::vector<MixedTypedExample> examples_simple_float16 = {
 // Begin of an example
 {
 .operands = {
@@ -169,8 +182,11 @@
 },
 }, // End of an example
 };
+return examples_simple_float16;
+};
 
-std::vector<MixedTypedExample> examples_broadcast = {
+std::vector<MixedTypedExample>& get_examples_broadcast() {
+static std::vector<MixedTypedExample> examples_broadcast = {
 // Begin of an example
 {
 .operands = {
@@ -203,8 +219,11 @@
 },
 }, // End of an example
 };
+return examples_broadcast;
+};
 
-std::vector<MixedTypedExample> examples_broadcast_relaxed = {
+std::vector<MixedTypedExample>& get_examples_broadcast_relaxed() {
+static std::vector<MixedTypedExample> examples_broadcast_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -237,8 +256,11 @@
 },
 }, // End of an example
 };
+return examples_broadcast_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_broadcast_quant8 = {
+std::vector<MixedTypedExample>& get_examples_broadcast_quant8() {
+static std::vector<MixedTypedExample> examples_broadcast_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -271,8 +293,11 @@
 },
 }, // End of an example
 };
+return examples_broadcast_quant8;
+};
 
-std::vector<MixedTypedExample> examples_broadcast_int32 = {
+std::vector<MixedTypedExample>& get_examples_broadcast_int32() {
+static std::vector<MixedTypedExample> examples_broadcast_int32 = {
 // Begin of an example
 {
 .operands = {
@@ -305,8 +330,11 @@
 },
 }, // End of an example
 };
+return examples_broadcast_int32;
+};
 
-std::vector<MixedTypedExample> examples_broadcast_float16 = {
+std::vector<MixedTypedExample>& get_examples_broadcast_float16() {
+static std::vector<MixedTypedExample> examples_broadcast_float16 = {
 // Begin of an example
 {
 .operands = {
@@ -339,4 +367,6 @@
 },
 }, // End of an example
 };
+return examples_broadcast_float16;
+};
 
diff --git a/nn/runtime/test/generated/examples/mobilenet_224_gender_basic_fixed.example.cpp b/nn/runtime/test/generated/examples/mobilenet_224_gender_basic_fixed.example.cpp
index 104c42b..f2017a3 100644
--- a/nn/runtime/test/generated/examples/mobilenet_224_gender_basic_fixed.example.cpp
+++ b/nn/runtime/test/generated/examples/mobilenet_224_gender_basic_fixed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: mobilenet_224_gender_basic_fixed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/mobilenet_224_gender_basic_fixed_relaxed.example.cpp b/nn/runtime/test/generated/examples/mobilenet_224_gender_basic_fixed_relaxed.example.cpp
index 6310aac..b61bb42 100644
--- a/nn/runtime/test/generated/examples/mobilenet_224_gender_basic_fixed_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/mobilenet_224_gender_basic_fixed_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: mobilenet_224_gender_basic_fixed_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/mobilenet_quantized.example.cpp b/nn/runtime/test/generated/examples/mobilenet_quantized.example.cpp
index afb6cab..1e0b41d 100644
--- a/nn/runtime/test/generated/examples/mobilenet_quantized.example.cpp
+++ b/nn/runtime/test/generated/examples/mobilenet_quantized.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: mobilenet_quantized.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/mul.example.cpp b/nn/runtime/test/generated/examples/mul.example.cpp
index 81f1c91..28794da 100644
--- a/nn/runtime/test/generated/examples/mul.example.cpp
+++ b/nn/runtime/test/generated/examples/mul.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: mul.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/mul_broadcast_float16.example.cpp b/nn/runtime/test/generated/examples/mul_broadcast_float16.example.cpp
index c0b9607..54eb897 100644
--- a/nn/runtime/test/generated/examples/mul_broadcast_float16.example.cpp
+++ b/nn/runtime/test/generated/examples/mul_broadcast_float16.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: mul_broadcast_float16.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/mul_broadcast_quant8.example.cpp b/nn/runtime/test/generated/examples/mul_broadcast_quant8.example.cpp
index 4e6eddd..1ae661c 100644
--- a/nn/runtime/test/generated/examples/mul_broadcast_quant8.example.cpp
+++ b/nn/runtime/test/generated/examples/mul_broadcast_quant8.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: mul_broadcast_quant8.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/mul_float16.example.cpp b/nn/runtime/test/generated/examples/mul_float16.example.cpp
index 95c5142..17de097 100644
--- a/nn/runtime/test/generated/examples/mul_float16.example.cpp
+++ b/nn/runtime/test/generated/examples/mul_float16.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: mul_float16.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/mul_quant8.example.cpp b/nn/runtime/test/generated/examples/mul_quant8.example.cpp
index c0927a0..0740d7e 100644
--- a/nn/runtime/test/generated/examples/mul_quant8.example.cpp
+++ b/nn/runtime/test/generated/examples/mul_quant8.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: mul_quant8.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/mul_relaxed.example.cpp b/nn/runtime/test/generated/examples/mul_relaxed.example.cpp
index 9f09c8d..324a53f 100644
--- a/nn/runtime/test/generated/examples/mul_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/mul_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: mul_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/mul_relu.example.cpp b/nn/runtime/test/generated/examples/mul_relu.example.cpp
index 16e0f3f..7a59ff2 100644
--- a/nn/runtime/test/generated/examples/mul_relu.example.cpp
+++ b/nn/runtime/test/generated/examples/mul_relu.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: mul_relu.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/mul_relu_relaxed.example.cpp b/nn/runtime/test/generated/examples/mul_relu_relaxed.example.cpp
index fe34289..326347c 100644
--- a/nn/runtime/test/generated/examples/mul_relu_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/mul_relu_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: mul_relu_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/pad.example.cpp b/nn/runtime/test/generated/examples/pad.example.cpp
index d2fd9b2..ab197e3 100644
--- a/nn/runtime/test/generated/examples/pad.example.cpp
+++ b/nn/runtime/test/generated/examples/pad.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: pad.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/argmin_2_quant8.example.cpp b/nn/runtime/test/generated/examples/pad_float16.example.cpp
similarity index 63%
copy from nn/runtime/test/generated/examples/argmin_2_quant8.example.cpp
copy to nn/runtime/test/generated/examples/pad_float16.example.cpp
index ce68216..57c32ed 100644
--- a/nn/runtime/test/generated/examples/argmin_2_quant8.example.cpp
+++ b/nn/runtime/test/generated/examples/pad_float16.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
-// Generated file (from: argmin_2_quant8.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+// Generated file (from: pad_float16.mod.py). Do not edit
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -11,26 +12,28 @@
   // int -> INT32 map
   {},
   // int -> QUANT8_ASYMM map
-  {{0, {1, 2, 4, 3}}},
+  {},
   // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
-  {},
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
 },
 //Output(s)
 { // See tools/test_generator/include/TestHarness.h:MixedTyped
   // int -> FLOAT32 map
   {},
   // int -> INT32 map
-  {{0, {0, 0}}},
+  {},
   // int -> QUANT8_ASYMM map
   {},
   // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
-  {},
+  {{0, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 2.0f, 0.0f, 0.0f, 3.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}}},
 }
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/pad_float_1.example.cpp b/nn/runtime/test/generated/examples/pad_float_1.example.cpp
index 5be42bc..52bcbc9 100644
--- a/nn/runtime/test/generated/examples/pad_float_1.example.cpp
+++ b/nn/runtime/test/generated/examples/pad_float_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: pad_float_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/pad_float_1_relaxed.example.cpp b/nn/runtime/test/generated/examples/pad_float_1_relaxed.example.cpp
index 56b5c2a..fe934c9 100644
--- a/nn/runtime/test/generated/examples/pad_float_1_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/pad_float_1_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: pad_float_1_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/pad_relaxed.example.cpp b/nn/runtime/test/generated/examples/pad_relaxed.example.cpp
index 92aa677..c7dd89a 100644
--- a/nn/runtime/test/generated/examples/pad_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/pad_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: pad_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/pad_v2_1_float.example.cpp b/nn/runtime/test/generated/examples/pad_v2_1_float.example.cpp
index aca670c..3309d29 100644
--- a/nn/runtime/test/generated/examples/pad_v2_1_float.example.cpp
+++ b/nn/runtime/test/generated/examples/pad_v2_1_float.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: pad_v2_1_float.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,43 @@
 },
 }, // End of an example
 };
+return examples;
+};
+
+std::vector<MixedTypedExample>& get_examples_float16() {
+static std::vector<MixedTypedExample> examples_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {9.300000190734863f, 1.0f, 2.0f, 3.0f, 9.300000190734863f, 9.300000190734863f, 9.300000190734863f, 9.300000190734863f, 4.0f, 5.0f, 6.0f, 9.300000190734863f, 9.300000190734863f, 9.300000190734863f, 9.300000190734863f, 9.300000190734863f, 9.300000190734863f, 9.300000190734863f, 9.300000190734863f, 9.300000190734863f, 9.300000190734863f, 9.300000190734863f, 9.300000190734863f, 9.300000190734863f, 9.300000190734863f, 9.300000190734863f, 9.300000190734863f, 9.300000190734863f}}},
+}
+},
+}, // End of an example
+};
+return examples_float16;
+};
 
diff --git a/nn/runtime/test/generated/examples/pad_v2_1_float_relaxed.example.cpp b/nn/runtime/test/generated/examples/pad_v2_1_float_relaxed.example.cpp
index c28e3ca..f911ed2 100644
--- a/nn/runtime/test/generated/examples/pad_v2_1_float_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/pad_v2_1_float_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: pad_v2_1_float_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/pad_v2_1_quant8.example.cpp b/nn/runtime/test/generated/examples/pad_v2_1_quant8.example.cpp
index bd35ac0..c4143ce 100644
--- a/nn/runtime/test/generated/examples/pad_v2_1_quant8.example.cpp
+++ b/nn/runtime/test/generated/examples/pad_v2_1_quant8.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: pad_v2_1_quant8.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/pow.example.cpp b/nn/runtime/test/generated/examples/pow.example.cpp
index c187074..4dbb517 100644
--- a/nn/runtime/test/generated/examples/pow.example.cpp
+++ b/nn/runtime/test/generated/examples/pow.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: pow.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
-std::vector<MixedTypedExample> examples_relaxed = {
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_2 = {
+std::vector<MixedTypedExample>& get_examples_2() {
+static std::vector<MixedTypedExample> examples_2 = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +108,11 @@
 },
 }, // End of an example
 };
+return examples_2;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_2() {
+static std::vector<MixedTypedExample> examples_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_3 = {
+std::vector<MixedTypedExample>& get_examples_3() {
+static std::vector<MixedTypedExample> examples_3 = {
 // Begin of an example
 {
 .operands = {
@@ -169,8 +182,11 @@
 },
 }, // End of an example
 };
+return examples_3;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_3 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_3() {
+static std::vector<MixedTypedExample> examples_relaxed_3 = {
 // Begin of an example
 {
 .operands = {
@@ -203,4 +219,6 @@
 },
 }, // End of an example
 };
+return examples_relaxed_3;
+};
 
diff --git a/nn/runtime/test/generated/examples/prelu.example.cpp b/nn/runtime/test/generated/examples/prelu.example.cpp
index f427e98..5158541 100644
--- a/nn/runtime/test/generated/examples/prelu.example.cpp
+++ b/nn/runtime/test/generated/examples/prelu.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: prelu.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
-std::vector<MixedTypedExample> examples_relaxed = {
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_quant8 = {
+std::vector<MixedTypedExample>& get_examples_quant8() {
+static std::vector<MixedTypedExample> examples_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +108,11 @@
 },
 }, // End of an example
 };
+return examples_quant8;
+};
 
-std::vector<MixedTypedExample> examples_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_weight_as_input() {
+static std::vector<MixedTypedExample> examples_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_weight_as_input_relaxed = {
+std::vector<MixedTypedExample>& get_examples_weight_as_input_relaxed() {
+static std::vector<MixedTypedExample> examples_weight_as_input_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -169,8 +182,11 @@
 },
 }, // End of an example
 };
+return examples_weight_as_input_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_weight_as_input_quant8 = {
+std::vector<MixedTypedExample>& get_examples_weight_as_input_quant8() {
+static std::vector<MixedTypedExample> examples_weight_as_input_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -203,4 +219,6 @@
 },
 }, // End of an example
 };
+return examples_weight_as_input_quant8;
+};
 
diff --git a/nn/runtime/test/generated/examples/quantize.example.cpp b/nn/runtime/test/generated/examples/quantize.example.cpp
index f1e130a..86dc98d 100644
--- a/nn/runtime/test/generated/examples/quantize.example.cpp
+++ b/nn/runtime/test/generated/examples/quantize.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: quantize.mod.py). Do not edit
-std::vector<MixedTypedExample> examples_quant8 = {
+std::vector<MixedTypedExample>& get_examples_quant8() {
+static std::vector<MixedTypedExample> examples_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples_quant8;
+};
 
-std::vector<MixedTypedExample> examples_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_quant8_2() {
+static std::vector<MixedTypedExample> examples_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_2;
+};
 
-std::vector<MixedTypedExample> examples_quant8_3 = {
+std::vector<MixedTypedExample>& get_examples_quant8_3() {
+static std::vector<MixedTypedExample> examples_quant8_3 = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +108,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_3;
+};
 
-std::vector<MixedTypedExample> examples_quant8_4 = {
+std::vector<MixedTypedExample>& get_examples_quant8_4() {
+static std::vector<MixedTypedExample> examples_quant8_4 = {
 // Begin of an example
 {
 .operands = {
@@ -135,4 +145,6 @@
 },
 }, // End of an example
 };
+return examples_quant8_4;
+};
 
diff --git a/nn/runtime/test/generated/examples/quantized_lstm.example.cpp b/nn/runtime/test/generated/examples/quantized_lstm.example.cpp
index 98f936a..0481dd8 100644
--- a/nn/runtime/test/generated/examples/quantized_lstm.example.cpp
+++ b/nn/runtime/test/generated/examples/quantized_lstm.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: quantized_lstm.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/random_multinomial.example.cpp b/nn/runtime/test/generated/examples/random_multinomial.example.cpp
index 999a467..3434cff 100644
--- a/nn/runtime/test/generated/examples/random_multinomial.example.cpp
+++ b/nn/runtime/test/generated/examples/random_multinomial.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: random_multinomial.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -34,4 +35,6 @@
 .expectedMultinomialDistributionTolerance = 0.025000
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/relu1_float16_1.example.cpp b/nn/runtime/test/generated/examples/relu1_float16_1.example.cpp
index 9f84e54..55fba70 100644
--- a/nn/runtime/test/generated/examples/relu1_float16_1.example.cpp
+++ b/nn/runtime/test/generated/examples/relu1_float16_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: relu1_float16_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/relu1_float16_2.example.cpp b/nn/runtime/test/generated/examples/relu1_float16_2.example.cpp
index 356b2c6..6d2f234 100644
--- a/nn/runtime/test/generated/examples/relu1_float16_2.example.cpp
+++ b/nn/runtime/test/generated/examples/relu1_float16_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: relu1_float16_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/relu1_float_1.example.cpp b/nn/runtime/test/generated/examples/relu1_float_1.example.cpp
index ef4d1d0..15dc917 100644
--- a/nn/runtime/test/generated/examples/relu1_float_1.example.cpp
+++ b/nn/runtime/test/generated/examples/relu1_float_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: relu1_float_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/relu1_float_1_relaxed.example.cpp b/nn/runtime/test/generated/examples/relu1_float_1_relaxed.example.cpp
index 0a9db4f..a540bb2 100644
--- a/nn/runtime/test/generated/examples/relu1_float_1_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/relu1_float_1_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: relu1_float_1_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/relu1_float_2.example.cpp b/nn/runtime/test/generated/examples/relu1_float_2.example.cpp
index ef8d379..811e8da 100644
--- a/nn/runtime/test/generated/examples/relu1_float_2.example.cpp
+++ b/nn/runtime/test/generated/examples/relu1_float_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: relu1_float_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/relu1_float_2_relaxed.example.cpp b/nn/runtime/test/generated/examples/relu1_float_2_relaxed.example.cpp
index fcb9f82..1e5a795 100644
--- a/nn/runtime/test/generated/examples/relu1_float_2_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/relu1_float_2_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: relu1_float_2_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/relu1_quant8_1.example.cpp b/nn/runtime/test/generated/examples/relu1_quant8_1.example.cpp
index 3bb367a..0bf1e9b 100644
--- a/nn/runtime/test/generated/examples/relu1_quant8_1.example.cpp
+++ b/nn/runtime/test/generated/examples/relu1_quant8_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: relu1_quant8_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -64,4 +65,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/relu1_quant8_2.example.cpp b/nn/runtime/test/generated/examples/relu1_quant8_2.example.cpp
index 0dc5120..4122828 100644
--- a/nn/runtime/test/generated/examples/relu1_quant8_2.example.cpp
+++ b/nn/runtime/test/generated/examples/relu1_quant8_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: relu1_quant8_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/relu6_float16_1.example.cpp b/nn/runtime/test/generated/examples/relu6_float16_1.example.cpp
index 63ade24..520412a 100644
--- a/nn/runtime/test/generated/examples/relu6_float16_1.example.cpp
+++ b/nn/runtime/test/generated/examples/relu6_float16_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: relu6_float16_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/relu6_float16_2.example.cpp b/nn/runtime/test/generated/examples/relu6_float16_2.example.cpp
index d6da9c2..8f8ad9e 100644
--- a/nn/runtime/test/generated/examples/relu6_float16_2.example.cpp
+++ b/nn/runtime/test/generated/examples/relu6_float16_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: relu6_float16_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/relu6_float_1.example.cpp b/nn/runtime/test/generated/examples/relu6_float_1.example.cpp
index 64e396b..9c5290a 100644
--- a/nn/runtime/test/generated/examples/relu6_float_1.example.cpp
+++ b/nn/runtime/test/generated/examples/relu6_float_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: relu6_float_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/relu6_float_1_relaxed.example.cpp b/nn/runtime/test/generated/examples/relu6_float_1_relaxed.example.cpp
index 1ec151f..76cfbf1 100644
--- a/nn/runtime/test/generated/examples/relu6_float_1_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/relu6_float_1_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: relu6_float_1_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/relu6_float_2.example.cpp b/nn/runtime/test/generated/examples/relu6_float_2.example.cpp
index 0ba8f89..c27e9ba 100644
--- a/nn/runtime/test/generated/examples/relu6_float_2.example.cpp
+++ b/nn/runtime/test/generated/examples/relu6_float_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: relu6_float_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/relu6_float_2_relaxed.example.cpp b/nn/runtime/test/generated/examples/relu6_float_2_relaxed.example.cpp
index 16cc738..823a461 100644
--- a/nn/runtime/test/generated/examples/relu6_float_2_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/relu6_float_2_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: relu6_float_2_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/relu6_quant8_1.example.cpp b/nn/runtime/test/generated/examples/relu6_quant8_1.example.cpp
index 2754985..a95a1bb 100644
--- a/nn/runtime/test/generated/examples/relu6_quant8_1.example.cpp
+++ b/nn/runtime/test/generated/examples/relu6_quant8_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: relu6_quant8_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -64,4 +65,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/relu6_quant8_2.example.cpp b/nn/runtime/test/generated/examples/relu6_quant8_2.example.cpp
index f6c4c8a..ba6ef2d 100644
--- a/nn/runtime/test/generated/examples/relu6_quant8_2.example.cpp
+++ b/nn/runtime/test/generated/examples/relu6_quant8_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: relu6_quant8_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/relu_float16_1.example.cpp b/nn/runtime/test/generated/examples/relu_float16_1.example.cpp
index 3055082..7f3e4c8 100644
--- a/nn/runtime/test/generated/examples/relu_float16_1.example.cpp
+++ b/nn/runtime/test/generated/examples/relu_float16_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: relu_float16_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/relu_float16_2.example.cpp b/nn/runtime/test/generated/examples/relu_float16_2.example.cpp
index eae341a..4035926 100644
--- a/nn/runtime/test/generated/examples/relu_float16_2.example.cpp
+++ b/nn/runtime/test/generated/examples/relu_float16_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: relu_float16_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/relu_float_1.example.cpp b/nn/runtime/test/generated/examples/relu_float_1.example.cpp
index d80348b..3cc825f 100644
--- a/nn/runtime/test/generated/examples/relu_float_1.example.cpp
+++ b/nn/runtime/test/generated/examples/relu_float_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: relu_float_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/relu_float_1_relaxed.example.cpp b/nn/runtime/test/generated/examples/relu_float_1_relaxed.example.cpp
index a9ff69d..dbdbc45 100644
--- a/nn/runtime/test/generated/examples/relu_float_1_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/relu_float_1_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: relu_float_1_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/relu_float_2.example.cpp b/nn/runtime/test/generated/examples/relu_float_2.example.cpp
index b0f19a6..a471c9e 100644
--- a/nn/runtime/test/generated/examples/relu_float_2.example.cpp
+++ b/nn/runtime/test/generated/examples/relu_float_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: relu_float_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/relu_float_2_relaxed.example.cpp b/nn/runtime/test/generated/examples/relu_float_2_relaxed.example.cpp
index 636aac9..a3be9a1 100644
--- a/nn/runtime/test/generated/examples/relu_float_2_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/relu_float_2_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: relu_float_2_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/relu_quant8_1.example.cpp b/nn/runtime/test/generated/examples/relu_quant8_1.example.cpp
index 4e66800..6f26b57 100644
--- a/nn/runtime/test/generated/examples/relu_quant8_1.example.cpp
+++ b/nn/runtime/test/generated/examples/relu_quant8_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: relu_quant8_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -64,4 +65,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/relu_quant8_2.example.cpp b/nn/runtime/test/generated/examples/relu_quant8_2.example.cpp
index 7b174f6..c38916d 100644
--- a/nn/runtime/test/generated/examples/relu_quant8_2.example.cpp
+++ b/nn/runtime/test/generated/examples/relu_quant8_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: relu_quant8_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/reshape.example.cpp b/nn/runtime/test/generated/examples/reshape.example.cpp
index c9d5c1e..cfb01d1 100644
--- a/nn/runtime/test/generated/examples/reshape.example.cpp
+++ b/nn/runtime/test/generated/examples/reshape.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: reshape.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/argmin_2_quant8.example.cpp b/nn/runtime/test/generated/examples/reshape_float16.example.cpp
similarity index 63%
copy from nn/runtime/test/generated/examples/argmin_2_quant8.example.cpp
copy to nn/runtime/test/generated/examples/reshape_float16.example.cpp
index ce68216..d5736c5 100644
--- a/nn/runtime/test/generated/examples/argmin_2_quant8.example.cpp
+++ b/nn/runtime/test/generated/examples/reshape_float16.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
-// Generated file (from: argmin_2_quant8.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+// Generated file (from: reshape_float16.mod.py). Do not edit
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -11,26 +12,28 @@
   // int -> INT32 map
   {},
   // int -> QUANT8_ASYMM map
-  {{0, {1, 2, 4, 3}}},
+  {},
   // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
-  {},
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f}}},
 },
 //Output(s)
 { // See tools/test_generator/include/TestHarness.h:MixedTyped
   // int -> FLOAT32 map
   {},
   // int -> INT32 map
-  {{0, {0, 0}}},
+  {},
   // int -> QUANT8_ASYMM map
   {},
   // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
-  {},
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f}}},
 }
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/reshape_quant8.example.cpp b/nn/runtime/test/generated/examples/reshape_quant8.example.cpp
index c3151fc..72c7f88 100644
--- a/nn/runtime/test/generated/examples/reshape_quant8.example.cpp
+++ b/nn/runtime/test/generated/examples/reshape_quant8.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: reshape_quant8.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/reshape_quant8_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/reshape_quant8_weights_as_inputs.example.cpp
index ae0c60a..21fe768 100644
--- a/nn/runtime/test/generated/examples/reshape_quant8_weights_as_inputs.example.cpp
+++ b/nn/runtime/test/generated/examples/reshape_quant8_weights_as_inputs.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: reshape_quant8_weights_as_inputs.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/reshape_relaxed.example.cpp b/nn/runtime/test/generated/examples/reshape_relaxed.example.cpp
index 0e2c5d4..d6ac15f 100644
--- a/nn/runtime/test/generated/examples/reshape_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/reshape_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: reshape_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/reshape_weights_as_inputs.example.cpp b/nn/runtime/test/generated/examples/reshape_weights_as_inputs.example.cpp
index 6421715..79f459b 100644
--- a/nn/runtime/test/generated/examples/reshape_weights_as_inputs.example.cpp
+++ b/nn/runtime/test/generated/examples/reshape_weights_as_inputs.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: reshape_weights_as_inputs.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/reshape_weights_as_inputs_relaxed.example.cpp b/nn/runtime/test/generated/examples/reshape_weights_as_inputs_relaxed.example.cpp
index 145d80d..b470936 100644
--- a/nn/runtime/test/generated/examples/reshape_weights_as_inputs_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/reshape_weights_as_inputs_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: reshape_weights_as_inputs_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/resize_bilinear.example.cpp b/nn/runtime/test/generated/examples/resize_bilinear.example.cpp
index b113f71..1844e88 100644
--- a/nn/runtime/test/generated/examples/resize_bilinear.example.cpp
+++ b/nn/runtime/test/generated/examples/resize_bilinear.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: resize_bilinear.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/resize_bilinear_2.example.cpp b/nn/runtime/test/generated/examples/resize_bilinear_2.example.cpp
index 66654c6..7e8ae83 100644
--- a/nn/runtime/test/generated/examples/resize_bilinear_2.example.cpp
+++ b/nn/runtime/test/generated/examples/resize_bilinear_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: resize_bilinear_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/resize_bilinear_2_relaxed.example.cpp b/nn/runtime/test/generated/examples/resize_bilinear_2_relaxed.example.cpp
index 1b93851..0d3087a 100644
--- a/nn/runtime/test/generated/examples/resize_bilinear_2_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/resize_bilinear_2_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: resize_bilinear_2_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/resize_bilinear_relaxed.example.cpp b/nn/runtime/test/generated/examples/resize_bilinear_relaxed.example.cpp
index 3a49e85..0b8c380 100644
--- a/nn/runtime/test/generated/examples/resize_bilinear_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/resize_bilinear_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: resize_bilinear_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/resize_bilinear_v1_2.example.cpp b/nn/runtime/test/generated/examples/resize_bilinear_v1_2.example.cpp
index e8221dc..851740a 100644
--- a/nn/runtime/test/generated/examples/resize_bilinear_v1_2.example.cpp
+++ b/nn/runtime/test/generated/examples/resize_bilinear_v1_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: resize_bilinear_v1_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples_nhwc = {
+std::vector<MixedTypedExample>& get_examples_nhwc() {
+static std::vector<MixedTypedExample> examples_nhwc = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,48 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nchw = {
+std::vector<MixedTypedExample>& get_examples_nhwc_float16() {
+static std::vector<MixedTypedExample> examples_nhwc_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 1.0f, 2.0f, 2.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 1.0f, 1.0f, 1.6666666269302368f, 1.6666666269302368f, 1.6666666269302368f, 2.0f, 2.0f, 2.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_nhwc_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_nchw() {
+static std::vector<MixedTypedExample> examples_nchw = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_nchw;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +182,48 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_float16() {
+static std::vector<MixedTypedExample> examples_nchw_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 1.0f, 2.0f, 2.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 1.0f, 1.0f, 1.6666666269302368f, 1.6666666269302368f, 1.6666666269302368f, 2.0f, 2.0f, 2.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_nchw_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_nhwc_2() {
+static std::vector<MixedTypedExample> examples_nhwc_2 = {
 // Begin of an example
 {
 .operands = {
@@ -169,8 +256,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -203,8 +293,48 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_float16_2() {
+static std::vector<MixedTypedExample> examples_nhwc_float16_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {3.0f, 4.0f, 6.0f, 10.0f, 9.0f, 10.0f, 12.0f, 16.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {3.0f, 4.0f, 5.0f, 8.0f, 6.0f, 10.0f, 7.0f, 8.0f, 9.0f, 12.0f, 10.0f, 14.0f, 9.0f, 10.0f, 11.0f, 14.0f, 12.0f, 16.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_nhwc_float16_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_nchw_2() {
+static std::vector<MixedTypedExample> examples_nchw_2 = {
 // Begin of an example
 {
 .operands = {
@@ -237,8 +367,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -271,4 +404,43 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_nchw_float16_2() {
+static std::vector<MixedTypedExample> examples_nchw_float16_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {3.0f, 6.0f, 9.0f, 12.0f, 4.0f, 10.0f, 10.0f, 16.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {3.0f, 5.0f, 6.0f, 7.0f, 9.0f, 10.0f, 9.0f, 11.0f, 12.0f, 4.0f, 8.0f, 10.0f, 8.0f, 12.0f, 14.0f, 10.0f, 14.0f, 16.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_nchw_float16_2;
+};
 
diff --git a/nn/runtime/test/generated/examples/rnn.example.cpp b/nn/runtime/test/generated/examples/rnn.example.cpp
index 8655572..7fb7bb4 100644
--- a/nn/runtime/test/generated/examples/rnn.example.cpp
+++ b/nn/runtime/test/generated/examples/rnn.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: rnn.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/rnn_relaxed.example.cpp b/nn/runtime/test/generated/examples/rnn_relaxed.example.cpp
index 9805d54..b604176 100644
--- a/nn/runtime/test/generated/examples/rnn_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/rnn_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: rnn_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/rnn_state.example.cpp b/nn/runtime/test/generated/examples/rnn_state.example.cpp
index cd3483f..c6b9ed7 100644
--- a/nn/runtime/test/generated/examples/rnn_state.example.cpp
+++ b/nn/runtime/test/generated/examples/rnn_state.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: rnn_state.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/rnn_state_relaxed.example.cpp b/nn/runtime/test/generated/examples/rnn_state_relaxed.example.cpp
index 3dc0e68..75e330a 100644
--- a/nn/runtime/test/generated/examples/rnn_state_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/rnn_state_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: rnn_state_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/roi_align.example.cpp b/nn/runtime/test/generated/examples/roi_align.example.cpp
index 19e75e4..fe25040 100644
--- a/nn/runtime/test/generated/examples/roi_align.example.cpp
+++ b/nn/runtime/test/generated/examples/roi_align.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: roi_align.mod.py). Do not edit
-std::vector<MixedTypedExample> examples_nhwc = {
+std::vector<MixedTypedExample>& get_examples_nhwc() {
+static std::vector<MixedTypedExample> examples_nhwc = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +108,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nchw = {
+std::vector<MixedTypedExample>& get_examples_nchw() {
+static std::vector<MixedTypedExample> examples_nchw = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_nchw;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -169,8 +182,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nchw_quant8() {
+static std::vector<MixedTypedExample> examples_nchw_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -203,8 +219,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_2() {
+static std::vector<MixedTypedExample> examples_nhwc_2 = {
 // Begin of an example
 {
 .operands = {
@@ -237,8 +256,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -271,8 +293,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8_2() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -305,8 +330,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_2() {
+static std::vector<MixedTypedExample> examples_nchw_2 = {
 // Begin of an example
 {
 .operands = {
@@ -339,8 +367,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -373,8 +404,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_quant8_2() {
+static std::vector<MixedTypedExample> examples_nchw_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -407,8 +441,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_3 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_3() {
+static std::vector<MixedTypedExample> examples_nhwc_3 = {
 // Begin of an example
 {
 .operands = {
@@ -441,8 +478,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_3;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed_3 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed_3() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed_3 = {
 // Begin of an example
 {
 .operands = {
@@ -475,8 +515,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed_3;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8_3 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8_3() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8_3 = {
 // Begin of an example
 {
 .operands = {
@@ -509,8 +552,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8_3;
+};
 
-std::vector<MixedTypedExample> examples_nchw_3 = {
+std::vector<MixedTypedExample>& get_examples_nchw_3() {
+static std::vector<MixedTypedExample> examples_nchw_3 = {
 // Begin of an example
 {
 .operands = {
@@ -543,8 +589,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_3;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed_3 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed_3() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed_3 = {
 // Begin of an example
 {
 .operands = {
@@ -577,8 +626,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed_3;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8_3 = {
+std::vector<MixedTypedExample>& get_examples_nchw_quant8_3() {
+static std::vector<MixedTypedExample> examples_nchw_quant8_3 = {
 // Begin of an example
 {
 .operands = {
@@ -611,4 +663,6 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8_3;
+};
 
diff --git a/nn/runtime/test/generated/examples/roi_pooling.example.cpp b/nn/runtime/test/generated/examples/roi_pooling.example.cpp
new file mode 100644
index 0000000..295d49b
--- /dev/null
+++ b/nn/runtime/test/generated/examples/roi_pooling.example.cpp
@@ -0,0 +1,446 @@
+// clang-format off
+// Generated file (from: roi_pooling.mod.py). Do not edit
+std::vector<MixedTypedExample>& get_examples_nhwc() {
+static std::vector<MixedTypedExample> examples_nhwc = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-10.0f, -1.0f, 4.0f, -5.0f, -8.0f, -2.0f, 9.0f, 1.0f, 7.0f, -2.0f, 3.0f, -7.0f, -2.0f, 10.0f, -3.0f, 5.0f}}, {1, {2.0f, 2.0f, 4.0f, 4.0f, 0.0f, 0.0f, 6.0f, 6.0f, 2.0f, 0.0f, 4.0f, 6.0f, 0.0f, 2.0f, 6.0f, 4.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-2.0f, 9.0f, -2.0f, 3.0f, -1.0f, 9.0f, 10.0f, 5.0f, -1.0f, 9.0f, 10.0f, 3.0f, -2.0f, 9.0f, 7.0f, 3.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_nhwc;
+};
+
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-10.0f, -1.0f, 4.0f, -5.0f, -8.0f, -2.0f, 9.0f, 1.0f, 7.0f, -2.0f, 3.0f, -7.0f, -2.0f, 10.0f, -3.0f, 5.0f}}, {1, {2.0f, 2.0f, 4.0f, 4.0f, 0.0f, 0.0f, 6.0f, 6.0f, 2.0f, 0.0f, 4.0f, 6.0f, 0.0f, 2.0f, 6.0f, 4.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-2.0f, 9.0f, -2.0f, 3.0f, -1.0f, 9.0f, 10.0f, 5.0f, -1.0f, 9.0f, 10.0f, 3.0f, -2.0f, 9.0f, 7.0f, 3.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_nhwc_relaxed;
+};
+
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{1, {2.0f, 2.0f, 4.0f, 4.0f, 0.0f, 0.0f, 6.0f, 6.0f, 2.0f, 0.0f, 4.0f, 6.0f, 0.0f, 2.0f, 6.0f, 4.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {88, 124, 144, 108, 96, 120, 164, 132, 156, 120, 140, 100, 120, 168, 116, 148}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {120, 164, 120, 140, 124, 164, 168, 148, 124, 164, 168, 140, 120, 164, 156, 140}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_nhwc_quant8;
+};
+
+std::vector<MixedTypedExample>& get_examples_nchw() {
+static std::vector<MixedTypedExample> examples_nchw = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-10.0f, -1.0f, 4.0f, -5.0f, -8.0f, -2.0f, 9.0f, 1.0f, 7.0f, -2.0f, 3.0f, -7.0f, -2.0f, 10.0f, -3.0f, 5.0f}}, {1, {2.0f, 2.0f, 4.0f, 4.0f, 0.0f, 0.0f, 6.0f, 6.0f, 2.0f, 0.0f, 4.0f, 6.0f, 0.0f, 2.0f, 6.0f, 4.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-2.0f, 9.0f, -2.0f, 3.0f, -1.0f, 9.0f, 10.0f, 5.0f, -1.0f, 9.0f, 10.0f, 3.0f, -2.0f, 9.0f, 7.0f, 3.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_nchw;
+};
+
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-10.0f, -1.0f, 4.0f, -5.0f, -8.0f, -2.0f, 9.0f, 1.0f, 7.0f, -2.0f, 3.0f, -7.0f, -2.0f, 10.0f, -3.0f, 5.0f}}, {1, {2.0f, 2.0f, 4.0f, 4.0f, 0.0f, 0.0f, 6.0f, 6.0f, 2.0f, 0.0f, 4.0f, 6.0f, 0.0f, 2.0f, 6.0f, 4.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-2.0f, 9.0f, -2.0f, 3.0f, -1.0f, 9.0f, 10.0f, 5.0f, -1.0f, 9.0f, 10.0f, 3.0f, -2.0f, 9.0f, 7.0f, 3.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_nchw_relaxed;
+};
+
+std::vector<MixedTypedExample>& get_examples_nchw_quant8() {
+static std::vector<MixedTypedExample> examples_nchw_quant8 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{1, {2.0f, 2.0f, 4.0f, 4.0f, 0.0f, 0.0f, 6.0f, 6.0f, 2.0f, 0.0f, 4.0f, 6.0f, 0.0f, 2.0f, 6.0f, 4.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {88, 124, 144, 108, 96, 120, 164, 132, 156, 120, 140, 100, 120, 168, 116, 148}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {120, 164, 120, 140, 124, 164, 168, 148, 124, 164, 168, 140, 120, 164, 156, 140}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_nchw_quant8;
+};
+
+std::vector<MixedTypedExample>& get_examples_nhwc_2() {
+static std::vector<MixedTypedExample> examples_nhwc_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {8.84f, 8.88f, 7.41f, 5.6f, 9.95f, 4.37f, 0.1f, 7.64f, 6.5f, 9.47f, 7.55f, 3.0f, 0.89f, 3.01f, 6.3f, 4.4f, 1.64f, 6.74f, 6.16f, 8.6f, 5.85f, 3.17f, 7.12f, 6.79f, 5.77f, 6.62f, 5.13f, 8.44f, 5.08f, 7.12f, 2.84f, 1.19f, 8.37f, 0.9f, 7.86f, 9.69f, 1.97f, 1.31f, 4.42f, 9.89f, 0.18f, 9.0f, 9.3f, 0.44f, 5.05f, 6.47f, 1.09f, 9.5f, 1.3f, 2.18f, 2.05f, 7.74f, 7.66f, 0.65f, 4.18f, 7.14f, 5.35f, 7.9f, 1.04f, 1.47f, 9.01f, 0.95f, 4.07f, 0.65f, 5.47f, 2.64f, 0.86f, 4.86f, 2.38f, 2.45f, 8.77f, 0.06f, 3.6f, 9.28f, 5.84f, 8.97f, 6.89f, 1.43f, 3.9f, 5.91f, 7.4f, 9.25f, 3.12f, 4.92f, 1.87f, 3.22f, 9.5f, 6.73f, 2.07f, 7.3f, 3.07f, 4.97f, 0.24f, 8.91f, 1.09f, 0.27f, 7.29f, 6.94f, 2.31f, 6.88f, 4.33f, 1.37f, 0.86f, 0.46f, 6.07f, 3.81f, 0.86f, 6.99f, 4.36f, 1.92f, 8.19f, 3.57f, 7.9f, 6.78f, 4.64f, 6.82f, 6.18f, 9.63f, 2.63f, 2.33f, 1.36f, 2.7f, 9.99f, 9.85f, 8.06f, 4.8f, 7.8f, 5.43f}}, {1, {0.0f, 4.0f, 4.0f, 24.0f, 8.0f, 0.0f, 4.0f, 4.0f, 28.0f, 12.0f, 1.0f, 7.0f, 1.0f, 25.0f, 11.0f, 1.0f, 1.0f, 7.0f, 5.0f, 11.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {6.16f, 8.6f, 7.12f, 6.79f, 5.13f, 8.44f, 7.86f, 9.69f, 4.42f, 9.89f, 9.3f, 6.47f, 7.86f, 9.89f, 9.3f, 9.89f, 9.3f, 9.5f, 7.86f, 9.89f, 9.3f, 9.89f, 9.3f, 9.5f, 9.5f, 6.73f, 9.5f, 9.28f, 6.89f, 8.97f, 6.18f, 9.63f, 9.99f, 9.85f, 9.99f, 9.85f, 7.29f, 6.94f, 7.29f, 6.94f, 2.31f, 6.88f, 7.9f, 6.78f, 7.9f, 6.82f, 4.64f, 6.82f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_nhwc_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {8.84f, 8.88f, 7.41f, 5.6f, 9.95f, 4.37f, 0.1f, 7.64f, 6.5f, 9.47f, 7.55f, 3.0f, 0.89f, 3.01f, 6.3f, 4.4f, 1.64f, 6.74f, 6.16f, 8.6f, 5.85f, 3.17f, 7.12f, 6.79f, 5.77f, 6.62f, 5.13f, 8.44f, 5.08f, 7.12f, 2.84f, 1.19f, 8.37f, 0.9f, 7.86f, 9.69f, 1.97f, 1.31f, 4.42f, 9.89f, 0.18f, 9.0f, 9.3f, 0.44f, 5.05f, 6.47f, 1.09f, 9.5f, 1.3f, 2.18f, 2.05f, 7.74f, 7.66f, 0.65f, 4.18f, 7.14f, 5.35f, 7.9f, 1.04f, 1.47f, 9.01f, 0.95f, 4.07f, 0.65f, 5.47f, 2.64f, 0.86f, 4.86f, 2.38f, 2.45f, 8.77f, 0.06f, 3.6f, 9.28f, 5.84f, 8.97f, 6.89f, 1.43f, 3.9f, 5.91f, 7.4f, 9.25f, 3.12f, 4.92f, 1.87f, 3.22f, 9.5f, 6.73f, 2.07f, 7.3f, 3.07f, 4.97f, 0.24f, 8.91f, 1.09f, 0.27f, 7.29f, 6.94f, 2.31f, 6.88f, 4.33f, 1.37f, 0.86f, 0.46f, 6.07f, 3.81f, 0.86f, 6.99f, 4.36f, 1.92f, 8.19f, 3.57f, 7.9f, 6.78f, 4.64f, 6.82f, 6.18f, 9.63f, 2.63f, 2.33f, 1.36f, 2.7f, 9.99f, 9.85f, 8.06f, 4.8f, 7.8f, 5.43f}}, {1, {0.0f, 4.0f, 4.0f, 24.0f, 8.0f, 0.0f, 4.0f, 4.0f, 28.0f, 12.0f, 1.0f, 7.0f, 1.0f, 25.0f, 11.0f, 1.0f, 1.0f, 7.0f, 5.0f, 11.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {6.16f, 8.6f, 7.12f, 6.79f, 5.13f, 8.44f, 7.86f, 9.69f, 4.42f, 9.89f, 9.3f, 6.47f, 7.86f, 9.89f, 9.3f, 9.89f, 9.3f, 9.5f, 7.86f, 9.89f, 9.3f, 9.89f, 9.3f, 9.5f, 9.5f, 6.73f, 9.5f, 9.28f, 6.89f, 8.97f, 6.18f, 9.63f, 9.99f, 9.85f, 9.99f, 9.85f, 7.29f, 6.94f, 7.29f, 6.94f, 2.31f, 6.88f, 7.9f, 6.78f, 7.9f, 6.82f, 4.64f, 6.82f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_nhwc_relaxed_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8_2() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{1, {0.0f, 4.0f, 4.0f, 24.0f, 8.0f, 0.0f, 4.0f, 4.0f, 28.0f, 12.0f, 1.0f, 7.0f, 1.0f, 25.0f, 11.0f, 1.0f, 1.0f, 7.0f, 5.0f, 11.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {221, 222, 185, 140, 249, 109, 2, 191, 162, 237, 189, 75, 22, 75, 158, 110, 41, 168, 154, 215, 146, 79, 178, 170, 144, 166, 128, 211, 127, 178, 71, 30, 209, 22, 197, 242, 49, 33, 111, 247, 5, 225, 233, 11, 126, 162, 27, 238, 32, 55, 51, 194, 192, 16, 104, 178, 134, 198, 26, 37, 225, 24, 102, 16, 137, 66, 22, 122, 60, 61, 219, 2, 90, 232, 146, 224, 172, 36, 98, 148, 185, 231, 78, 123, 47, 80, 238, 168, 52, 183, 77, 124, 6, 223, 27, 7, 182, 174, 58, 172, 108, 34, 22, 12, 152, 95, 22, 175, 109, 48, 205, 89, 198, 170, 116, 171, 154, 241, 66, 58, 34, 68, 250, 246, 202, 120, 195, 136}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {154, 215, 178, 170, 128, 211, 197, 242, 111, 247, 233, 162, 197, 247, 233, 247, 233, 238, 197, 247, 233, 247, 233, 238, 238, 168, 238, 232, 172, 224, 154, 241, 250, 246, 250, 246, 182, 174, 182, 174, 58, 172, 198, 170, 198, 171, 116, 171}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_nhwc_quant8_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_nchw_2() {
+static std::vector<MixedTypedExample> examples_nchw_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {8.84f, 7.41f, 9.95f, 0.1f, 6.5f, 7.55f, 0.89f, 6.3f, 1.64f, 6.16f, 5.85f, 7.12f, 5.77f, 5.13f, 5.08f, 2.84f, 8.37f, 7.86f, 1.97f, 4.42f, 0.18f, 9.3f, 5.05f, 1.09f, 1.3f, 2.05f, 7.66f, 4.18f, 5.35f, 1.04f, 9.01f, 4.07f, 8.88f, 5.6f, 4.37f, 7.64f, 9.47f, 3.0f, 3.01f, 4.4f, 6.74f, 8.6f, 3.17f, 6.79f, 6.62f, 8.44f, 7.12f, 1.19f, 0.9f, 9.69f, 1.31f, 9.89f, 9.0f, 0.44f, 6.47f, 9.5f, 2.18f, 7.74f, 0.65f, 7.14f, 7.9f, 1.47f, 0.95f, 0.65f, 5.47f, 0.86f, 2.38f, 8.77f, 3.6f, 5.84f, 6.89f, 3.9f, 7.4f, 3.12f, 1.87f, 9.5f, 2.07f, 3.07f, 0.24f, 1.09f, 7.29f, 2.31f, 4.33f, 0.86f, 6.07f, 0.86f, 4.36f, 8.19f, 7.9f, 4.64f, 6.18f, 2.63f, 1.36f, 9.99f, 8.06f, 7.8f, 2.64f, 4.86f, 2.45f, 0.06f, 9.28f, 8.97f, 1.43f, 5.91f, 9.25f, 4.92f, 3.22f, 6.73f, 7.3f, 4.97f, 8.91f, 0.27f, 6.94f, 6.88f, 1.37f, 0.46f, 3.81f, 6.99f, 1.92f, 3.57f, 6.78f, 6.82f, 9.63f, 2.33f, 2.7f, 9.85f, 4.8f, 5.43f}}, {1, {0.0f, 4.0f, 4.0f, 24.0f, 8.0f, 0.0f, 4.0f, 4.0f, 28.0f, 12.0f, 1.0f, 7.0f, 1.0f, 25.0f, 11.0f, 1.0f, 1.0f, 7.0f, 5.0f, 11.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {6.16f, 7.12f, 5.13f, 7.86f, 4.42f, 9.3f, 8.6f, 6.79f, 8.44f, 9.69f, 9.89f, 6.47f, 7.86f, 9.3f, 9.3f, 7.86f, 9.3f, 9.3f, 9.89f, 9.89f, 9.5f, 9.89f, 9.89f, 9.5f, 9.5f, 9.5f, 6.89f, 6.18f, 9.99f, 9.99f, 6.73f, 9.28f, 8.97f, 9.63f, 9.85f, 9.85f, 7.29f, 7.29f, 2.31f, 7.9f, 7.9f, 4.64f, 6.94f, 6.94f, 6.88f, 6.78f, 6.82f, 6.82f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_nchw_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {8.84f, 7.41f, 9.95f, 0.1f, 6.5f, 7.55f, 0.89f, 6.3f, 1.64f, 6.16f, 5.85f, 7.12f, 5.77f, 5.13f, 5.08f, 2.84f, 8.37f, 7.86f, 1.97f, 4.42f, 0.18f, 9.3f, 5.05f, 1.09f, 1.3f, 2.05f, 7.66f, 4.18f, 5.35f, 1.04f, 9.01f, 4.07f, 8.88f, 5.6f, 4.37f, 7.64f, 9.47f, 3.0f, 3.01f, 4.4f, 6.74f, 8.6f, 3.17f, 6.79f, 6.62f, 8.44f, 7.12f, 1.19f, 0.9f, 9.69f, 1.31f, 9.89f, 9.0f, 0.44f, 6.47f, 9.5f, 2.18f, 7.74f, 0.65f, 7.14f, 7.9f, 1.47f, 0.95f, 0.65f, 5.47f, 0.86f, 2.38f, 8.77f, 3.6f, 5.84f, 6.89f, 3.9f, 7.4f, 3.12f, 1.87f, 9.5f, 2.07f, 3.07f, 0.24f, 1.09f, 7.29f, 2.31f, 4.33f, 0.86f, 6.07f, 0.86f, 4.36f, 8.19f, 7.9f, 4.64f, 6.18f, 2.63f, 1.36f, 9.99f, 8.06f, 7.8f, 2.64f, 4.86f, 2.45f, 0.06f, 9.28f, 8.97f, 1.43f, 5.91f, 9.25f, 4.92f, 3.22f, 6.73f, 7.3f, 4.97f, 8.91f, 0.27f, 6.94f, 6.88f, 1.37f, 0.46f, 3.81f, 6.99f, 1.92f, 3.57f, 6.78f, 6.82f, 9.63f, 2.33f, 2.7f, 9.85f, 4.8f, 5.43f}}, {1, {0.0f, 4.0f, 4.0f, 24.0f, 8.0f, 0.0f, 4.0f, 4.0f, 28.0f, 12.0f, 1.0f, 7.0f, 1.0f, 25.0f, 11.0f, 1.0f, 1.0f, 7.0f, 5.0f, 11.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {6.16f, 7.12f, 5.13f, 7.86f, 4.42f, 9.3f, 8.6f, 6.79f, 8.44f, 9.69f, 9.89f, 6.47f, 7.86f, 9.3f, 9.3f, 7.86f, 9.3f, 9.3f, 9.89f, 9.89f, 9.5f, 9.89f, 9.89f, 9.5f, 9.5f, 9.5f, 6.89f, 6.18f, 9.99f, 9.99f, 6.73f, 9.28f, 8.97f, 9.63f, 9.85f, 9.85f, 7.29f, 7.29f, 2.31f, 7.9f, 7.9f, 4.64f, 6.94f, 6.94f, 6.88f, 6.78f, 6.82f, 6.82f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_nchw_relaxed_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_nchw_quant8_2() {
+static std::vector<MixedTypedExample> examples_nchw_quant8_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{1, {0.0f, 4.0f, 4.0f, 24.0f, 8.0f, 0.0f, 4.0f, 4.0f, 28.0f, 12.0f, 1.0f, 7.0f, 1.0f, 25.0f, 11.0f, 1.0f, 1.0f, 7.0f, 5.0f, 11.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {221, 185, 249, 2, 162, 189, 22, 158, 41, 154, 146, 178, 144, 128, 127, 71, 209, 197, 49, 111, 5, 233, 126, 27, 32, 51, 192, 104, 134, 26, 225, 102, 222, 140, 109, 191, 237, 75, 75, 110, 168, 215, 79, 170, 166, 211, 178, 30, 22, 242, 33, 247, 225, 11, 162, 238, 55, 194, 16, 178, 198, 37, 24, 16, 137, 22, 60, 219, 90, 146, 172, 98, 185, 78, 47, 238, 52, 77, 6, 27, 182, 58, 108, 22, 152, 22, 109, 205, 198, 116, 154, 66, 34, 250, 202, 195, 66, 122, 61, 2, 232, 224, 36, 148, 231, 123, 80, 168, 183, 124, 223, 7, 174, 172, 34, 12, 95, 175, 48, 89, 170, 171, 241, 58, 68, 246, 120, 136}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {154, 178, 128, 197, 111, 233, 215, 170, 211, 242, 247, 162, 197, 233, 233, 197, 233, 233, 247, 247, 238, 247, 247, 238, 238, 238, 172, 154, 250, 250, 168, 232, 224, 241, 246, 246, 182, 182, 58, 198, 198, 116, 174, 174, 172, 170, 171, 171}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_nchw_quant8_2;
+};
+
diff --git a/nn/runtime/test/generated/examples/rotated_bbox_transform.example.cpp b/nn/runtime/test/generated/examples/rotated_bbox_transform.example.cpp
index 4fe212c..2465e42 100644
--- a/nn/runtime/test/generated/examples/rotated_bbox_transform.example.cpp
+++ b/nn/runtime/test/generated/examples/rotated_bbox_transform.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: rotated_bbox_transform.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
-std::vector<MixedTypedExample> examples_relaxed = {
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_2 = {
+std::vector<MixedTypedExample>& get_examples_2() {
+static std::vector<MixedTypedExample> examples_2 = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +108,11 @@
 },
 }, // End of an example
 };
+return examples_2;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_2() {
+static std::vector<MixedTypedExample> examples_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_3 = {
+std::vector<MixedTypedExample>& get_examples_3() {
+static std::vector<MixedTypedExample> examples_3 = {
 // Begin of an example
 {
 .operands = {
@@ -169,8 +182,11 @@
 },
 }, // End of an example
 };
+return examples_3;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_3 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_3() {
+static std::vector<MixedTypedExample> examples_relaxed_3 = {
 // Begin of an example
 {
 .operands = {
@@ -203,8 +219,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_3;
+};
 
-std::vector<MixedTypedExample> examples_single_batch = {
+std::vector<MixedTypedExample>& get_examples_single_batch() {
+static std::vector<MixedTypedExample> examples_single_batch = {
 // Begin of an example
 {
 .operands = {
@@ -237,8 +256,11 @@
 },
 }, // End of an example
 };
+return examples_single_batch;
+};
 
-std::vector<MixedTypedExample> examples_single_batch_relaxed = {
+std::vector<MixedTypedExample>& get_examples_single_batch_relaxed() {
+static std::vector<MixedTypedExample> examples_single_batch_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -271,4 +293,6 @@
 },
 }, // End of an example
 };
+return examples_single_batch_relaxed;
+};
 
diff --git a/nn/runtime/test/generated/examples/slice.example.cpp b/nn/runtime/test/generated/examples/slice.example.cpp
new file mode 100644
index 0000000..00e6eea
--- /dev/null
+++ b/nn/runtime/test/generated/examples/slice.example.cpp
@@ -0,0 +1,818 @@
+// clang-format off
+// Generated file (from: slice.mod.py). Do not edit
+std::vector<MixedTypedExample> examples = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
+  // int -> INT32 map
+  {{1, {1}}, {2, {2}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {2.0f, 3.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_relaxed = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
+  // int -> INT32 map
+  {{1, {1}}, {2, {2}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {2.0f, 3.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{1, {1}}, {2, {2}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {2.0f, 3.0f}}},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}}},
+  // int -> INT32 map
+  {{1, {1, 0}}, {2, {1, 2}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {4.0f, 5.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_relaxed_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}}},
+  // int -> INT32 map
+  {{1, {1, 0}}, {2, {1, 2}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {4.0f, 5.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_float16_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{1, {1, 0}}, {2, {1, 2}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {4.0f, 5.0f}}},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_3 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f}}},
+  // int -> INT32 map
+  {{1, {0, 0, 0}}, {2, {2, 3, 2}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_relaxed_3 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f}}},
+  // int -> INT32 map
+  {{1, {0, 0, 0}}, {2, {2, 3, 2}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_float16_3 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{1, {0, 0, 0}}, {2, {2, 3, 2}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f}}},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_4 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
+  // int -> INT32 map
+  {{1, {1, 0, 0, 0}}, {2, {3, 1, 1, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {2.0f, 3.0f, 4.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_relaxed_4 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
+  // int -> INT32 map
+  {{1, {1, 0, 0, 0}}, {2, {3, 1, 1, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {2.0f, 3.0f, 4.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_float16_4 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{1, {1, 0, 0, 0}}, {2, {3, 1, 1, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {2.0f, 3.0f, 4.0f}}},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_5 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6}}, {1, {1, 0, 0, 0}}, {2, {1, 1, 3, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {3, 3, 3}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_relaxed_5 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6}}, {1, {1, 0, 0, 0}}, {2, {1, 1, 3, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {3, 3, 3}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_float16_5 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6}}, {1, {1, 0, 0, 0}}, {2, {1, 1, 3, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {3, 3, 3}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_6 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6}}, {1, {1, 0, 0, 0}}, {2, {2, 1, 3, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {3, 3, 3, 5, 5, 5}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_relaxed_6 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6}}, {1, {1, 0, 0, 0}}, {2, {2, 1, 3, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {3, 3, 3, 5, 5, 5}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_float16_6 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6}}, {1, {1, 0, 0, 0}}, {2, {2, 1, 3, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {3, 3, 3, 5, 5, 5}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_7 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{1, {1, 0, 0, 0}}, {2, {2, 1, 3, 1}}},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {3, 3, 3, 5, 5, 5}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_relaxed_7 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{1, {1, 0, 0, 0}}, {2, {2, 1, 3, 1}}},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {3, 3, 3, 5, 5, 5}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_float16_7 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{1, {1, 0, 0, 0}}, {2, {2, 1, 3, 1}}},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {3, 3, 3, 5, 5, 5}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_8 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6}}, {1, {1, 0, 0, 0}}, {2, {2, 1, -1, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {3, 3, 3, 5, 5, 5}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_relaxed_8 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6}}, {1, {1, 0, 0, 0}}, {2, {2, 1, -1, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {3, 3, 3, 5, 5, 5}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_float16_8 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6}}, {1, {1, 0, 0, 0}}, {2, {2, 1, -1, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {3, 3, 3, 5, 5, 5}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
diff --git a/nn/runtime/test/generated/examples/softmax_float_1.example.cpp b/nn/runtime/test/generated/examples/softmax_float_1.example.cpp
index e8ed69f..64e8587 100644
--- a/nn/runtime/test/generated/examples/softmax_float_1.example.cpp
+++ b/nn/runtime/test/generated/examples/softmax_float_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: softmax_float_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/softmax_float_1_relaxed.example.cpp b/nn/runtime/test/generated/examples/softmax_float_1_relaxed.example.cpp
index ee51ee6..76f90e1 100644
--- a/nn/runtime/test/generated/examples/softmax_float_1_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/softmax_float_1_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: softmax_float_1_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/softmax_float_2.example.cpp b/nn/runtime/test/generated/examples/softmax_float_2.example.cpp
index bb9cd77..fae753e 100644
--- a/nn/runtime/test/generated/examples/softmax_float_2.example.cpp
+++ b/nn/runtime/test/generated/examples/softmax_float_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: softmax_float_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/softmax_float_2_relaxed.example.cpp b/nn/runtime/test/generated/examples/softmax_float_2_relaxed.example.cpp
index 54baee8..4cdd341 100644
--- a/nn/runtime/test/generated/examples/softmax_float_2_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/softmax_float_2_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: softmax_float_2_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/softmax_quant8_1.example.cpp b/nn/runtime/test/generated/examples/softmax_quant8_1.example.cpp
index f0cfc24..a6bb733 100644
--- a/nn/runtime/test/generated/examples/softmax_quant8_1.example.cpp
+++ b/nn/runtime/test/generated/examples/softmax_quant8_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: softmax_quant8_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/softmax_quant8_2.example.cpp b/nn/runtime/test/generated/examples/softmax_quant8_2.example.cpp
index 2bf6d19..e02a98a 100644
--- a/nn/runtime/test/generated/examples/softmax_quant8_2.example.cpp
+++ b/nn/runtime/test/generated/examples/softmax_quant8_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: softmax_quant8_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/softmax_v1_2.example.cpp b/nn/runtime/test/generated/examples/softmax_v1_2.example.cpp
index 57d7815..0045a19 100644
--- a/nn/runtime/test/generated/examples/softmax_v1_2.example.cpp
+++ b/nn/runtime/test/generated/examples/softmax_v1_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: softmax_v1_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
-std::vector<MixedTypedExample> examples_dim1_axis0 = {
+std::vector<MixedTypedExample>& get_examples_dim1_axis0() {
+static std::vector<MixedTypedExample> examples_dim1_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_dim1_axis0;
+};
 
-std::vector<MixedTypedExample> examples_dim3_axis2 = {
+std::vector<MixedTypedExample>& get_examples_dim3_axis2() {
+static std::vector<MixedTypedExample> examples_dim3_axis2 = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +108,11 @@
 },
 }, // End of an example
 };
+return examples_dim3_axis2;
+};
 
-std::vector<MixedTypedExample> examples_relaxed = {
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_dim1_axis0 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_dim1_axis0() {
+static std::vector<MixedTypedExample> examples_relaxed_dim1_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -169,8 +182,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_dim1_axis0;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_dim3_axis2 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_dim3_axis2() {
+static std::vector<MixedTypedExample> examples_relaxed_dim3_axis2 = {
 // Begin of an example
 {
 .operands = {
@@ -203,8 +219,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_dim3_axis2;
+};
 
-std::vector<MixedTypedExample> examples_float16 = {
+std::vector<MixedTypedExample>& get_examples_float16() {
+static std::vector<MixedTypedExample> examples_float16 = {
 // Begin of an example
 {
 .operands = {
@@ -216,7 +235,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
@@ -229,7 +248,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f}}},
@@ -237,8 +256,11 @@
 },
 }, // End of an example
 };
+return examples_float16;
+};
 
-std::vector<MixedTypedExample> examples_float16_dim1_axis0 = {
+std::vector<MixedTypedExample>& get_examples_float16_dim1_axis0() {
+static std::vector<MixedTypedExample> examples_float16_dim1_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -250,7 +272,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f}}},
@@ -263,7 +285,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f}}},
@@ -271,8 +293,11 @@
 },
 }, // End of an example
 };
+return examples_float16_dim1_axis0;
+};
 
-std::vector<MixedTypedExample> examples_float16_dim3_axis2 = {
+std::vector<MixedTypedExample>& get_examples_float16_dim3_axis2() {
+static std::vector<MixedTypedExample> examples_float16_dim3_axis2 = {
 // Begin of an example
 {
 .operands = {
@@ -284,7 +309,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
@@ -297,7 +322,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f}}},
@@ -305,8 +330,11 @@
 },
 }, // End of an example
 };
+return examples_float16_dim3_axis2;
+};
 
-std::vector<MixedTypedExample> examples_quant8 = {
+std::vector<MixedTypedExample>& get_examples_quant8() {
+static std::vector<MixedTypedExample> examples_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -339,8 +367,11 @@
 },
 }, // End of an example
 };
+return examples_quant8;
+};
 
-std::vector<MixedTypedExample> examples_quant8_dim1_axis0 = {
+std::vector<MixedTypedExample>& get_examples_quant8_dim1_axis0() {
+static std::vector<MixedTypedExample> examples_quant8_dim1_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -373,8 +404,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_dim1_axis0;
+};
 
-std::vector<MixedTypedExample> examples_quant8_dim3_axis2 = {
+std::vector<MixedTypedExample>& get_examples_quant8_dim3_axis2() {
+static std::vector<MixedTypedExample> examples_quant8_dim3_axis2 = {
 // Begin of an example
 {
 .operands = {
@@ -407,8 +441,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_dim3_axis2;
+};
 
-std::vector<MixedTypedExample> examples_2 = {
+std::vector<MixedTypedExample>& get_examples_2() {
+static std::vector<MixedTypedExample> examples_2 = {
 // Begin of an example
 {
 .operands = {
@@ -441,8 +478,11 @@
 },
 }, // End of an example
 };
+return examples_2;
+};
 
-std::vector<MixedTypedExample> examples_dim1_axis0_2 = {
+std::vector<MixedTypedExample>& get_examples_dim1_axis0_2() {
+static std::vector<MixedTypedExample> examples_dim1_axis0_2 = {
 // Begin of an example
 {
 .operands = {
@@ -475,8 +515,11 @@
 },
 }, // End of an example
 };
+return examples_dim1_axis0_2;
+};
 
-std::vector<MixedTypedExample> examples_dim3_axis2_2 = {
+std::vector<MixedTypedExample>& get_examples_dim3_axis2_2() {
+static std::vector<MixedTypedExample> examples_dim3_axis2_2 = {
 // Begin of an example
 {
 .operands = {
@@ -509,8 +552,11 @@
 },
 }, // End of an example
 };
+return examples_dim3_axis2_2;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_2() {
+static std::vector<MixedTypedExample> examples_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -543,8 +589,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_dim1_axis0_2 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_dim1_axis0_2() {
+static std::vector<MixedTypedExample> examples_relaxed_dim1_axis0_2 = {
 // Begin of an example
 {
 .operands = {
@@ -577,8 +626,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_dim1_axis0_2;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_dim3_axis2_2 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_dim3_axis2_2() {
+static std::vector<MixedTypedExample> examples_relaxed_dim3_axis2_2 = {
 // Begin of an example
 {
 .operands = {
@@ -611,8 +663,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_dim3_axis2_2;
+};
 
-std::vector<MixedTypedExample> examples_float16_2 = {
+std::vector<MixedTypedExample>& get_examples_float16_2() {
+static std::vector<MixedTypedExample> examples_float16_2 = {
 // Begin of an example
 {
 .operands = {
@@ -624,7 +679,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
@@ -637,7 +692,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f}}},
@@ -645,8 +700,11 @@
 },
 }, // End of an example
 };
+return examples_float16_2;
+};
 
-std::vector<MixedTypedExample> examples_float16_dim1_axis0_2 = {
+std::vector<MixedTypedExample>& get_examples_float16_dim1_axis0_2() {
+static std::vector<MixedTypedExample> examples_float16_dim1_axis0_2 = {
 // Begin of an example
 {
 .operands = {
@@ -658,7 +716,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f}}},
@@ -671,7 +729,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f}}},
@@ -679,8 +737,11 @@
 },
 }, // End of an example
 };
+return examples_float16_dim1_axis0_2;
+};
 
-std::vector<MixedTypedExample> examples_float16_dim3_axis2_2 = {
+std::vector<MixedTypedExample>& get_examples_float16_dim3_axis2_2() {
+static std::vector<MixedTypedExample> examples_float16_dim3_axis2_2 = {
 // Begin of an example
 {
 .operands = {
@@ -692,7 +753,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
@@ -705,7 +766,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f}}},
@@ -713,8 +774,11 @@
 },
 }, // End of an example
 };
+return examples_float16_dim3_axis2_2;
+};
 
-std::vector<MixedTypedExample> examples_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_quant8_2() {
+static std::vector<MixedTypedExample> examples_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -747,8 +811,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_2;
+};
 
-std::vector<MixedTypedExample> examples_quant8_dim1_axis0_2 = {
+std::vector<MixedTypedExample>& get_examples_quant8_dim1_axis0_2() {
+static std::vector<MixedTypedExample> examples_quant8_dim1_axis0_2 = {
 // Begin of an example
 {
 .operands = {
@@ -781,8 +848,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_dim1_axis0_2;
+};
 
-std::vector<MixedTypedExample> examples_quant8_dim3_axis2_2 = {
+std::vector<MixedTypedExample>& get_examples_quant8_dim3_axis2_2() {
+static std::vector<MixedTypedExample> examples_quant8_dim3_axis2_2 = {
 // Begin of an example
 {
 .operands = {
@@ -815,8 +885,11 @@
 },
 }, // End of an example
 };
+return examples_quant8_dim3_axis2_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis0() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -849,8 +922,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -883,8 +959,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis1 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis1() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -917,8 +996,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis1;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis1_neg() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -951,8 +1033,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis2() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis2 = {
 // Begin of an example
 {
 .operands = {
@@ -985,8 +1070,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis2_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis2_neg() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis2_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1019,8 +1107,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis2_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis3 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis3() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis3 = {
 // Begin of an example
 {
 .operands = {
@@ -1053,8 +1144,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis3;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis3_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis3_neg() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis3_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1087,8 +1181,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis3_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis0() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -1121,8 +1218,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1155,8 +1255,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis1 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis1() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -1189,8 +1292,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis1;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis1_neg() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1223,8 +1329,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis2() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis2 = {
 // Begin of an example
 {
 .operands = {
@@ -1257,8 +1366,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis2_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis2_neg() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis2_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1291,8 +1403,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis2_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim2_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim2_axis0() {
+static std::vector<MixedTypedExample> examples_axis_dim2_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -1325,8 +1440,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim2_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim2_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_dim2_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_dim2_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1359,8 +1477,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim2_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim2_axis1 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim2_axis1() {
+static std::vector<MixedTypedExample> examples_axis_dim2_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -1393,8 +1514,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim2_axis1;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim2_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_dim2_axis1_neg() {
+static std::vector<MixedTypedExample> examples_axis_dim2_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1427,8 +1551,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim2_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim1_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim1_axis0() {
+static std::vector<MixedTypedExample> examples_axis_dim1_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -1461,8 +1588,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim1_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim1_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_dim1_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_dim1_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1495,8 +1625,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim1_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis0() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -1529,8 +1662,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1563,8 +1699,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis1 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis1() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -1597,8 +1736,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis1;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis1_neg() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1631,8 +1773,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis2 = {
 // Begin of an example
 {
 .operands = {
@@ -1665,8 +1810,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis2_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis2_neg() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis2_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1699,8 +1847,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis2_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis3 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis3() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis3 = {
 // Begin of an example
 {
 .operands = {
@@ -1733,8 +1884,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis3;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis3_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis3_neg() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis3_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1767,8 +1921,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis3_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis0() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -1801,8 +1958,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1835,8 +1995,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis1 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis1() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -1869,8 +2032,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis1;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis1_neg() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1903,8 +2069,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis2 = {
 // Begin of an example
 {
 .operands = {
@@ -1937,8 +2106,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis2_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis2_neg() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis2_neg = {
 // Begin of an example
 {
 .operands = {
@@ -1971,8 +2143,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis2_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim2_axis0() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -2005,8 +2180,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim2_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim2_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -2039,8 +2217,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim2_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis1 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim2_axis1() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -2073,8 +2254,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim2_axis1;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim2_axis1_neg() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -2107,8 +2291,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim2_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim1_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim1_axis0() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim1_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -2141,8 +2328,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim1_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim1_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim1_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim1_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -2175,8 +2365,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim1_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim4_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim4_axis0() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim4_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -2188,7 +2381,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
@@ -2201,7 +2394,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f}}},
@@ -2209,8 +2402,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim4_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim4_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim4_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim4_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -2222,7 +2418,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
@@ -2235,7 +2431,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f}}},
@@ -2243,8 +2439,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim4_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim4_axis1 = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim4_axis1() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim4_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -2256,7 +2455,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
@@ -2269,7 +2468,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f}}},
@@ -2277,8 +2476,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim4_axis1;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim4_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim4_axis1_neg() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim4_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -2290,7 +2492,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
@@ -2303,7 +2505,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f}}},
@@ -2311,8 +2513,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim4_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim4_axis2 = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim4_axis2() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim4_axis2 = {
 // Begin of an example
 {
 .operands = {
@@ -2324,7 +2529,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
@@ -2337,7 +2542,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f}}},
@@ -2345,8 +2550,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim4_axis2;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim4_axis2_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim4_axis2_neg() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim4_axis2_neg = {
 // Begin of an example
 {
 .operands = {
@@ -2358,7 +2566,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
@@ -2371,7 +2579,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f}}},
@@ -2379,8 +2587,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim4_axis2_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim4_axis3 = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim4_axis3() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim4_axis3 = {
 // Begin of an example
 {
 .operands = {
@@ -2392,7 +2603,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
@@ -2405,7 +2616,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f}}},
@@ -2413,8 +2624,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim4_axis3;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim4_axis3_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim4_axis3_neg() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim4_axis3_neg = {
 // Begin of an example
 {
 .operands = {
@@ -2426,7 +2640,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
@@ -2439,7 +2653,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f}}},
@@ -2447,8 +2661,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim4_axis3_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim3_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim3_axis0() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim3_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -2460,7 +2677,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
@@ -2473,7 +2690,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f}}},
@@ -2481,8 +2698,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim3_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim3_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim3_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim3_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -2494,7 +2714,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
@@ -2507,7 +2727,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f}}},
@@ -2515,8 +2735,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim3_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim3_axis1 = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim3_axis1() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim3_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -2528,7 +2751,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
@@ -2541,7 +2764,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f}}},
@@ -2549,8 +2772,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim3_axis1;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim3_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim3_axis1_neg() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim3_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -2562,7 +2788,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
@@ -2575,7 +2801,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f}}},
@@ -2583,8 +2809,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim3_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim3_axis2 = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim3_axis2() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim3_axis2 = {
 // Begin of an example
 {
 .operands = {
@@ -2596,7 +2825,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
@@ -2609,7 +2838,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f}}},
@@ -2617,8 +2846,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim3_axis2;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim3_axis2_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim3_axis2_neg() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim3_axis2_neg = {
 // Begin of an example
 {
 .operands = {
@@ -2630,7 +2862,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
@@ -2643,7 +2875,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f}}},
@@ -2651,8 +2883,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim3_axis2_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim2_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim2_axis0() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim2_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -2664,7 +2899,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
@@ -2677,7 +2912,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f}}},
@@ -2685,8 +2920,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim2_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim2_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim2_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim2_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -2698,7 +2936,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
@@ -2711,7 +2949,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f}}},
@@ -2719,8 +2957,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim2_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim2_axis1 = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim2_axis1() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim2_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -2732,7 +2973,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
@@ -2745,7 +2986,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f}}},
@@ -2753,8 +2994,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim2_axis1;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim2_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim2_axis1_neg() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim2_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -2766,7 +3010,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
@@ -2779,7 +3023,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f}}},
@@ -2787,8 +3031,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim2_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim1_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim1_axis0() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim1_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -2800,7 +3047,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f}}},
@@ -2813,7 +3060,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f}}},
@@ -2821,8 +3068,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim1_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim1_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim1_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim1_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -2834,7 +3084,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f}}},
@@ -2847,7 +3097,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f}}},
@@ -2855,8 +3105,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim1_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim4_axis0() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -2889,8 +3142,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim4_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim4_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -2923,8 +3179,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim4_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis1 = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim4_axis1() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -2957,8 +3216,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim4_axis1;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim4_axis1_neg() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -2991,8 +3253,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim4_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis2 = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim4_axis2() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis2 = {
 // Begin of an example
 {
 .operands = {
@@ -3025,8 +3290,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim4_axis2;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis2_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim4_axis2_neg() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis2_neg = {
 // Begin of an example
 {
 .operands = {
@@ -3059,8 +3327,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim4_axis2_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis3 = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim4_axis3() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis3 = {
 // Begin of an example
 {
 .operands = {
@@ -3093,8 +3364,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim4_axis3;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis3_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim4_axis3_neg() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis3_neg = {
 // Begin of an example
 {
 .operands = {
@@ -3127,8 +3401,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim4_axis3_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim3_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim3_axis0() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim3_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -3161,8 +3438,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim3_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim3_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim3_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim3_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -3195,8 +3475,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim3_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim3_axis1 = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim3_axis1() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim3_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -3229,8 +3512,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim3_axis1;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim3_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim3_axis1_neg() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim3_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -3263,8 +3549,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim3_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim3_axis2 = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim3_axis2() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim3_axis2 = {
 // Begin of an example
 {
 .operands = {
@@ -3297,8 +3586,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim3_axis2;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim3_axis2_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim3_axis2_neg() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim3_axis2_neg = {
 // Begin of an example
 {
 .operands = {
@@ -3331,8 +3623,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim3_axis2_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim2_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim2_axis0() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim2_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -3365,8 +3660,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim2_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim2_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim2_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim2_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -3399,8 +3697,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim2_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim2_axis1 = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim2_axis1() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim2_axis1 = {
 // Begin of an example
 {
 .operands = {
@@ -3433,8 +3734,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim2_axis1;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim2_axis1_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim2_axis1_neg() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim2_axis1_neg = {
 // Begin of an example
 {
 .operands = {
@@ -3467,8 +3771,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim2_axis1_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim1_axis0 = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim1_axis0() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim1_axis0 = {
 // Begin of an example
 {
 .operands = {
@@ -3501,8 +3808,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim1_axis0;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim1_axis0_neg = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim1_axis0_neg() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim1_axis0_neg = {
 // Begin of an example
 {
 .operands = {
@@ -3535,8 +3845,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim1_axis0_neg;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis0_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis0_2() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis0_2 = {
 // Begin of an example
 {
 .operands = {
@@ -3569,8 +3882,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis0_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis0_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis0_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis0_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -3603,8 +3919,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis0_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis1_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis1_2() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis1_2 = {
 // Begin of an example
 {
 .operands = {
@@ -3637,8 +3956,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis1_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis1_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis1_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis1_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -3671,8 +3993,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis1_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis2_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis2_2() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis2_2 = {
 // Begin of an example
 {
 .operands = {
@@ -3705,8 +4030,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis2_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis2_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis2_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis2_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -3739,8 +4067,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis2_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis3_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis3_2() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis3_2 = {
 // Begin of an example
 {
 .operands = {
@@ -3773,8 +4104,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis3_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim4_axis3_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim4_axis3_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_dim4_axis3_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -3807,8 +4141,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim4_axis3_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis0_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis0_2() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis0_2 = {
 // Begin of an example
 {
 .operands = {
@@ -3841,8 +4178,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis0_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis0_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis0_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis0_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -3875,8 +4215,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis0_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis1_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis1_2() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis1_2 = {
 // Begin of an example
 {
 .operands = {
@@ -3909,8 +4252,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis1_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis1_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis1_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis1_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -3943,8 +4289,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis1_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis2_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis2_2() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis2_2 = {
 // Begin of an example
 {
 .operands = {
@@ -3977,8 +4326,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis2_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim3_axis2_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim3_axis2_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_dim3_axis2_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -4011,8 +4363,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim3_axis2_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim2_axis0_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim2_axis0_2() {
+static std::vector<MixedTypedExample> examples_axis_dim2_axis0_2 = {
 // Begin of an example
 {
 .operands = {
@@ -4045,8 +4400,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim2_axis0_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim2_axis0_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim2_axis0_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_dim2_axis0_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -4079,8 +4437,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim2_axis0_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim2_axis1_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim2_axis1_2() {
+static std::vector<MixedTypedExample> examples_axis_dim2_axis1_2 = {
 // Begin of an example
 {
 .operands = {
@@ -4113,8 +4474,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim2_axis1_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim2_axis1_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim2_axis1_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_dim2_axis1_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -4147,8 +4511,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim2_axis1_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim1_axis0_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim1_axis0_2() {
+static std::vector<MixedTypedExample> examples_axis_dim1_axis0_2 = {
 // Begin of an example
 {
 .operands = {
@@ -4181,8 +4548,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim1_axis0_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_dim1_axis0_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_dim1_axis0_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_dim1_axis0_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -4215,8 +4585,11 @@
 },
 }, // End of an example
 };
+return examples_axis_dim1_axis0_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis0_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis0_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis0_2 = {
 // Begin of an example
 {
 .operands = {
@@ -4249,8 +4622,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis0_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis0_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis0_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis0_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -4283,8 +4659,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis0_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis1_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis1_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis1_2 = {
 // Begin of an example
 {
 .operands = {
@@ -4317,8 +4696,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis1_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis1_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis1_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis1_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -4351,8 +4733,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis1_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis2_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis2_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis2_2 = {
 // Begin of an example
 {
 .operands = {
@@ -4385,8 +4770,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis2_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis2_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis2_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis2_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -4419,8 +4807,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis2_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis3_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis3_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis3_2 = {
 // Begin of an example
 {
 .operands = {
@@ -4453,8 +4844,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis3_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis3_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim4_axis3_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim4_axis3_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -4487,8 +4881,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim4_axis3_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis0_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis0_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis0_2 = {
 // Begin of an example
 {
 .operands = {
@@ -4521,8 +4918,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis0_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis0_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis0_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis0_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -4555,8 +4955,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis0_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis1_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis1_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis1_2 = {
 // Begin of an example
 {
 .operands = {
@@ -4589,8 +4992,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis1_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis1_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis1_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis1_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -4623,8 +5029,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis1_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis2_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis2_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis2_2 = {
 // Begin of an example
 {
 .operands = {
@@ -4657,8 +5066,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis2_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis2_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim3_axis2_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim3_axis2_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -4691,8 +5103,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim3_axis2_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis0_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim2_axis0_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis0_2 = {
 // Begin of an example
 {
 .operands = {
@@ -4725,8 +5140,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim2_axis0_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis0_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim2_axis0_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis0_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -4759,8 +5177,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim2_axis0_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis1_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim2_axis1_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis1_2 = {
 // Begin of an example
 {
 .operands = {
@@ -4793,8 +5214,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim2_axis1_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis1_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim2_axis1_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim2_axis1_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -4827,8 +5251,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim2_axis1_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim1_axis0_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim1_axis0_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim1_axis0_2 = {
 // Begin of an example
 {
 .operands = {
@@ -4861,8 +5288,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim1_axis0_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_relaxed_dim1_axis0_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_relaxed_dim1_axis0_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_relaxed_dim1_axis0_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -4895,8 +5325,11 @@
 },
 }, // End of an example
 };
+return examples_axis_relaxed_dim1_axis0_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim4_axis0_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim4_axis0_2() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim4_axis0_2 = {
 // Begin of an example
 {
 .operands = {
@@ -4908,7 +5341,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
@@ -4921,7 +5354,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f}}},
@@ -4929,8 +5362,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim4_axis0_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim4_axis0_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim4_axis0_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim4_axis0_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -4942,7 +5378,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
@@ -4955,7 +5391,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f}}},
@@ -4963,8 +5399,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim4_axis0_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim4_axis1_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim4_axis1_2() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim4_axis1_2 = {
 // Begin of an example
 {
 .operands = {
@@ -4976,7 +5415,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
@@ -4989,7 +5428,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f}}},
@@ -4997,8 +5436,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim4_axis1_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim4_axis1_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim4_axis1_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim4_axis1_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -5010,7 +5452,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
@@ -5023,7 +5465,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f}}},
@@ -5031,8 +5473,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim4_axis1_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim4_axis2_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim4_axis2_2() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim4_axis2_2 = {
 // Begin of an example
 {
 .operands = {
@@ -5044,7 +5489,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
@@ -5057,7 +5502,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f}}},
@@ -5065,8 +5510,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim4_axis2_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim4_axis2_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim4_axis2_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim4_axis2_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -5078,7 +5526,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
@@ -5091,7 +5539,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f}}},
@@ -5099,8 +5547,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim4_axis2_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim4_axis3_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim4_axis3_2() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim4_axis3_2 = {
 // Begin of an example
 {
 .operands = {
@@ -5112,7 +5563,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
@@ -5125,7 +5576,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f}}},
@@ -5133,8 +5584,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim4_axis3_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim4_axis3_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim4_axis3_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim4_axis3_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -5146,7 +5600,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
@@ -5159,7 +5613,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f}}},
@@ -5167,8 +5621,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim4_axis3_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim3_axis0_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim3_axis0_2() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim3_axis0_2 = {
 // Begin of an example
 {
 .operands = {
@@ -5180,7 +5637,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
@@ -5193,7 +5650,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f}}},
@@ -5201,8 +5658,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim3_axis0_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim3_axis0_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim3_axis0_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim3_axis0_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -5214,7 +5674,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
@@ -5227,7 +5687,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f}}},
@@ -5235,8 +5695,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim3_axis0_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim3_axis1_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim3_axis1_2() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim3_axis1_2 = {
 // Begin of an example
 {
 .operands = {
@@ -5248,7 +5711,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
@@ -5261,7 +5724,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f}}},
@@ -5269,8 +5732,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim3_axis1_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim3_axis1_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim3_axis1_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim3_axis1_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -5282,7 +5748,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
@@ -5295,7 +5761,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f}}},
@@ -5303,8 +5769,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim3_axis1_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim3_axis2_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim3_axis2_2() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim3_axis2_2 = {
 // Begin of an example
 {
 .operands = {
@@ -5316,7 +5785,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
@@ -5329,7 +5798,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f}}},
@@ -5337,8 +5806,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim3_axis2_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim3_axis2_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim3_axis2_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim3_axis2_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -5350,7 +5822,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
@@ -5363,7 +5835,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f}}},
@@ -5371,8 +5843,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim3_axis2_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim2_axis0_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim2_axis0_2() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim2_axis0_2 = {
 // Begin of an example
 {
 .operands = {
@@ -5384,7 +5859,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
@@ -5397,7 +5872,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f}}},
@@ -5405,8 +5880,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim2_axis0_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim2_axis0_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim2_axis0_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim2_axis0_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -5418,7 +5896,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
@@ -5431,7 +5909,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f}}},
@@ -5439,8 +5917,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim2_axis0_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim2_axis1_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim2_axis1_2() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim2_axis1_2 = {
 // Begin of an example
 {
 .operands = {
@@ -5452,7 +5933,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
@@ -5465,7 +5946,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f}}},
@@ -5473,8 +5954,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim2_axis1_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim2_axis1_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim2_axis1_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim2_axis1_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -5486,7 +5970,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
@@ -5499,7 +5983,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f}}},
@@ -5507,8 +5991,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim2_axis1_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim1_axis0_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim1_axis0_2() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim1_axis0_2 = {
 // Begin of an example
 {
 .operands = {
@@ -5520,7 +6007,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f}}},
@@ -5533,7 +6020,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f}}},
@@ -5541,8 +6028,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim1_axis0_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_float16_dim1_axis0_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_float16_dim1_axis0_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_float16_dim1_axis0_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -5554,7 +6044,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f}}},
@@ -5567,7 +6057,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f, 0.20000000298023224f}}},
@@ -5575,8 +6065,11 @@
 },
 }, // End of an example
 };
+return examples_axis_float16_dim1_axis0_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis0_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim4_axis0_2() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis0_2 = {
 // Begin of an example
 {
 .operands = {
@@ -5609,8 +6102,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim4_axis0_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis0_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim4_axis0_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis0_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -5643,8 +6139,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim4_axis0_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis1_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim4_axis1_2() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis1_2 = {
 // Begin of an example
 {
 .operands = {
@@ -5677,8 +6176,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim4_axis1_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis1_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim4_axis1_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis1_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -5711,8 +6213,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim4_axis1_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis2_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim4_axis2_2() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis2_2 = {
 // Begin of an example
 {
 .operands = {
@@ -5745,8 +6250,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim4_axis2_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis2_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim4_axis2_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis2_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -5779,8 +6287,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim4_axis2_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis3_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim4_axis3_2() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis3_2 = {
 // Begin of an example
 {
 .operands = {
@@ -5813,8 +6324,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim4_axis3_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis3_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim4_axis3_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim4_axis3_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -5847,8 +6361,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim4_axis3_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim3_axis0_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim3_axis0_2() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim3_axis0_2 = {
 // Begin of an example
 {
 .operands = {
@@ -5881,8 +6398,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim3_axis0_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim3_axis0_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim3_axis0_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim3_axis0_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -5915,8 +6435,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim3_axis0_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim3_axis1_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim3_axis1_2() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim3_axis1_2 = {
 // Begin of an example
 {
 .operands = {
@@ -5949,8 +6472,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim3_axis1_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim3_axis1_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim3_axis1_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim3_axis1_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -5983,8 +6509,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim3_axis1_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim3_axis2_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim3_axis2_2() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim3_axis2_2 = {
 // Begin of an example
 {
 .operands = {
@@ -6017,8 +6546,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim3_axis2_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim3_axis2_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim3_axis2_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim3_axis2_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -6051,8 +6583,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim3_axis2_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim2_axis0_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim2_axis0_2() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim2_axis0_2 = {
 // Begin of an example
 {
 .operands = {
@@ -6085,8 +6620,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim2_axis0_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim2_axis0_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim2_axis0_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim2_axis0_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -6119,8 +6657,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim2_axis0_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim2_axis1_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim2_axis1_2() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim2_axis1_2 = {
 // Begin of an example
 {
 .operands = {
@@ -6153,8 +6694,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim2_axis1_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim2_axis1_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim2_axis1_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim2_axis1_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -6187,8 +6731,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim2_axis1_neg_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim1_axis0_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim1_axis0_2() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim1_axis0_2 = {
 // Begin of an example
 {
 .operands = {
@@ -6221,8 +6768,11 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim1_axis0_2;
+};
 
-std::vector<MixedTypedExample> examples_axis_quant8_dim1_axis0_neg_2 = {
+std::vector<MixedTypedExample>& get_examples_axis_quant8_dim1_axis0_neg_2() {
+static std::vector<MixedTypedExample> examples_axis_quant8_dim1_axis0_neg_2 = {
 // Begin of an example
 {
 .operands = {
@@ -6255,4 +6805,6 @@
 },
 }, // End of an example
 };
+return examples_axis_quant8_dim1_axis0_neg_2;
+};
 
diff --git a/nn/runtime/test/generated/examples/space_to_batch.example.cpp b/nn/runtime/test/generated/examples/space_to_batch.example.cpp
index 72d7d44..33e7474 100644
--- a/nn/runtime/test/generated/examples/space_to_batch.example.cpp
+++ b/nn/runtime/test/generated/examples/space_to_batch.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: space_to_batch.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/space_to_batch_float_1.example.cpp b/nn/runtime/test/generated/examples/space_to_batch_float_1.example.cpp
index c25c75b..1763341 100644
--- a/nn/runtime/test/generated/examples/space_to_batch_float_1.example.cpp
+++ b/nn/runtime/test/generated/examples/space_to_batch_float_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: space_to_batch_float_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/space_to_batch_float_1_relaxed.example.cpp b/nn/runtime/test/generated/examples/space_to_batch_float_1_relaxed.example.cpp
index 4a24a08..6e1bcc8 100644
--- a/nn/runtime/test/generated/examples/space_to_batch_float_1_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/space_to_batch_float_1_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: space_to_batch_float_1_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/space_to_batch_float_2.example.cpp b/nn/runtime/test/generated/examples/space_to_batch_float_2.example.cpp
index afe10ad..61e1d6a 100644
--- a/nn/runtime/test/generated/examples/space_to_batch_float_2.example.cpp
+++ b/nn/runtime/test/generated/examples/space_to_batch_float_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: space_to_batch_float_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/space_to_batch_float_2_relaxed.example.cpp b/nn/runtime/test/generated/examples/space_to_batch_float_2_relaxed.example.cpp
index e12ca93..ba498e5 100644
--- a/nn/runtime/test/generated/examples/space_to_batch_float_2_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/space_to_batch_float_2_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: space_to_batch_float_2_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/space_to_batch_float_3.example.cpp b/nn/runtime/test/generated/examples/space_to_batch_float_3.example.cpp
index 74ed6c7..0216810 100644
--- a/nn/runtime/test/generated/examples/space_to_batch_float_3.example.cpp
+++ b/nn/runtime/test/generated/examples/space_to_batch_float_3.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: space_to_batch_float_3.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/space_to_batch_float_3_relaxed.example.cpp b/nn/runtime/test/generated/examples/space_to_batch_float_3_relaxed.example.cpp
index bf60bff..32607b6 100644
--- a/nn/runtime/test/generated/examples/space_to_batch_float_3_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/space_to_batch_float_3_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: space_to_batch_float_3_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/space_to_batch_quant8_1.example.cpp b/nn/runtime/test/generated/examples/space_to_batch_quant8_1.example.cpp
index 35dbffb..82b8b51 100644
--- a/nn/runtime/test/generated/examples/space_to_batch_quant8_1.example.cpp
+++ b/nn/runtime/test/generated/examples/space_to_batch_quant8_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: space_to_batch_quant8_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/space_to_batch_quant8_2.example.cpp b/nn/runtime/test/generated/examples/space_to_batch_quant8_2.example.cpp
index ac79ab2..f023c8e 100644
--- a/nn/runtime/test/generated/examples/space_to_batch_quant8_2.example.cpp
+++ b/nn/runtime/test/generated/examples/space_to_batch_quant8_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: space_to_batch_quant8_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/space_to_batch_quant8_3.example.cpp b/nn/runtime/test/generated/examples/space_to_batch_quant8_3.example.cpp
index 6df799e..51e2327 100644
--- a/nn/runtime/test/generated/examples/space_to_batch_quant8_3.example.cpp
+++ b/nn/runtime/test/generated/examples/space_to_batch_quant8_3.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: space_to_batch_quant8_3.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/space_to_batch_relaxed.example.cpp b/nn/runtime/test/generated/examples/space_to_batch_relaxed.example.cpp
index 10cfe38..4423a27 100644
--- a/nn/runtime/test/generated/examples/space_to_batch_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/space_to_batch_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: space_to_batch_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/space_to_batch_v1_2.example.cpp b/nn/runtime/test/generated/examples/space_to_batch_v1_2.example.cpp
index a4b52de..84ad68e 100644
--- a/nn/runtime/test/generated/examples/space_to_batch_v1_2.example.cpp
+++ b/nn/runtime/test/generated/examples/space_to_batch_v1_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: space_to_batch_v1_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples_nhwc = {
+std::vector<MixedTypedExample>& get_examples_nhwc() {
+static std::vector<MixedTypedExample> examples_nhwc = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,48 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_float16() {
+static std::vector<MixedTypedExample> examples_nhwc_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.399999976158142f, 2.299999952316284f, 3.200000047683716f, 4.099999904632568f, 5.400000095367432f, 6.300000190734863f, 7.199999809265137f, 8.100000381469727f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.399999976158142f, 2.299999952316284f, 3.200000047683716f, 4.099999904632568f, 5.400000095367432f, 6.300000190734863f, 7.199999809265137f, 8.100000381469727f}}},
+}
+},
+}, // End of an example
+};
+return examples_nhwc_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nchw = {
+std::vector<MixedTypedExample>& get_examples_nchw() {
+static std::vector<MixedTypedExample> examples_nchw = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +182,11 @@
 },
 }, // End of an example
 };
+return examples_nchw;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -169,8 +219,48 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nchw_float16() {
+static std::vector<MixedTypedExample> examples_nchw_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.399999976158142f, 3.200000047683716f, 5.400000095367432f, 7.199999809265137f, 2.299999952316284f, 4.099999904632568f, 6.300000190734863f, 8.100000381469727f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.399999976158142f, 2.299999952316284f, 3.200000047683716f, 4.099999904632568f, 5.400000095367432f, 6.300000190734863f, 7.199999809265137f, 8.100000381469727f}}},
+}
+},
+}, // End of an example
+};
+return examples_nchw_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_nchw_quant8() {
+static std::vector<MixedTypedExample> examples_nchw_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -203,8 +293,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_2() {
+static std::vector<MixedTypedExample> examples_nhwc_2 = {
 // Begin of an example
 {
 .operands = {
@@ -237,8 +330,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -271,8 +367,48 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_float16_2() {
+static std::vector<MixedTypedExample> examples_nhwc_float16_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 3.0f, 9.0f, 11.0f, 2.0f, 4.0f, 10.0f, 12.0f, 5.0f, 7.0f, 13.0f, 15.0f, 6.0f, 8.0f, 14.0f, 16.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_nhwc_float16_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8_2() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -305,8 +441,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_2() {
+static std::vector<MixedTypedExample> examples_nchw_2 = {
 // Begin of an example
 {
 .operands = {
@@ -339,8 +478,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -373,8 +515,48 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_float16_2() {
+static std::vector<MixedTypedExample> examples_nchw_float16_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 3.0f, 9.0f, 11.0f, 2.0f, 4.0f, 10.0f, 12.0f, 5.0f, 7.0f, 13.0f, 15.0f, 6.0f, 8.0f, 14.0f, 16.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_nchw_float16_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_nchw_quant8_2() {
+static std::vector<MixedTypedExample> examples_nchw_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -407,8 +589,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_3 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_3() {
+static std::vector<MixedTypedExample> examples_nhwc_3 = {
 // Begin of an example
 {
 .operands = {
@@ -441,8 +626,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_3;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed_3 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed_3() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed_3 = {
 // Begin of an example
 {
 .operands = {
@@ -475,8 +663,48 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed_3;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8_3 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_float16_3() {
+static std::vector<MixedTypedExample> examples_nhwc_float16_3 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {0.0f, 0.0f, 0.0f, 5.0f, 0.0f, 0.0f, 0.0f, 6.0f, 0.0f, 1.0f, 0.0f, 7.0f, 0.0f, 2.0f, 0.0f, 8.0f, 0.0f, 3.0f, 0.0f, 9.0f, 0.0f, 4.0f, 0.0f, 10.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_nhwc_float16_3;
+};
+
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8_3() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8_3 = {
 // Begin of an example
 {
 .operands = {
@@ -509,8 +737,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8_3;
+};
 
-std::vector<MixedTypedExample> examples_nchw_3 = {
+std::vector<MixedTypedExample>& get_examples_nchw_3() {
+static std::vector<MixedTypedExample> examples_nchw_3 = {
 // Begin of an example
 {
 .operands = {
@@ -543,8 +774,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_3;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed_3 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed_3() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed_3 = {
 // Begin of an example
 {
 .operands = {
@@ -577,8 +811,48 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed_3;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8_3 = {
+std::vector<MixedTypedExample>& get_examples_nchw_float16_3() {
+static std::vector<MixedTypedExample> examples_nchw_float16_3 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {0.0f, 0.0f, 0.0f, 5.0f, 0.0f, 0.0f, 0.0f, 6.0f, 0.0f, 1.0f, 0.0f, 7.0f, 0.0f, 2.0f, 0.0f, 8.0f, 0.0f, 3.0f, 0.0f, 9.0f, 0.0f, 4.0f, 0.0f, 10.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_nchw_float16_3;
+};
+
+std::vector<MixedTypedExample>& get_examples_nchw_quant8_3() {
+static std::vector<MixedTypedExample> examples_nchw_quant8_3 = {
 // Begin of an example
 {
 .operands = {
@@ -611,8 +885,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8_3;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_4 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_4() {
+static std::vector<MixedTypedExample> examples_nhwc_4 = {
 // Begin of an example
 {
 .operands = {
@@ -645,8 +922,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_4;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed_4 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed_4() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed_4 = {
 // Begin of an example
 {
 .operands = {
@@ -679,8 +959,48 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed_4;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8_4 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_float16_4() {
+static std::vector<MixedTypedExample> examples_nhwc_float16_4 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 5.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 6.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 7.0f, 0.0f, 0.0f, 0.0f, 2.0f, 0.0f, 0.0f, 0.0f, 8.0f, 0.0f, 0.0f, 0.0f, 3.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_nhwc_float16_4;
+};
+
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8_4() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8_4 = {
 // Begin of an example
 {
 .operands = {
@@ -713,8 +1033,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8_4;
+};
 
-std::vector<MixedTypedExample> examples_nchw_4 = {
+std::vector<MixedTypedExample>& get_examples_nchw_4() {
+static std::vector<MixedTypedExample> examples_nchw_4 = {
 // Begin of an example
 {
 .operands = {
@@ -747,8 +1070,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_4;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed_4 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed_4() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed_4 = {
 // Begin of an example
 {
 .operands = {
@@ -781,8 +1107,48 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed_4;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8_4 = {
+std::vector<MixedTypedExample>& get_examples_nchw_float16_4() {
+static std::vector<MixedTypedExample> examples_nchw_float16_4 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 5.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 6.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 7.0f, 0.0f, 0.0f, 0.0f, 2.0f, 0.0f, 0.0f, 0.0f, 8.0f, 0.0f, 0.0f, 0.0f, 3.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_nchw_float16_4;
+};
+
+std::vector<MixedTypedExample>& get_examples_nchw_quant8_4() {
+static std::vector<MixedTypedExample> examples_nchw_quant8_4 = {
 // Begin of an example
 {
 .operands = {
@@ -815,4 +1181,6 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8_4;
+};
 
diff --git a/nn/runtime/test/generated/examples/space_to_depth_float_1.example.cpp b/nn/runtime/test/generated/examples/space_to_depth_float_1.example.cpp
index b9c3bb1..f240d75 100644
--- a/nn/runtime/test/generated/examples/space_to_depth_float_1.example.cpp
+++ b/nn/runtime/test/generated/examples/space_to_depth_float_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: space_to_depth_float_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/space_to_depth_float_1_relaxed.example.cpp b/nn/runtime/test/generated/examples/space_to_depth_float_1_relaxed.example.cpp
index af5c011..6d23591 100644
--- a/nn/runtime/test/generated/examples/space_to_depth_float_1_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/space_to_depth_float_1_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: space_to_depth_float_1_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/space_to_depth_float_2.example.cpp b/nn/runtime/test/generated/examples/space_to_depth_float_2.example.cpp
index f4f6766..eca16ea 100644
--- a/nn/runtime/test/generated/examples/space_to_depth_float_2.example.cpp
+++ b/nn/runtime/test/generated/examples/space_to_depth_float_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: space_to_depth_float_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/space_to_depth_float_2_relaxed.example.cpp b/nn/runtime/test/generated/examples/space_to_depth_float_2_relaxed.example.cpp
index 731aae6..4ff9bf4 100644
--- a/nn/runtime/test/generated/examples/space_to_depth_float_2_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/space_to_depth_float_2_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: space_to_depth_float_2_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/space_to_depth_float_3.example.cpp b/nn/runtime/test/generated/examples/space_to_depth_float_3.example.cpp
index d0e9ed8..be8b940 100644
--- a/nn/runtime/test/generated/examples/space_to_depth_float_3.example.cpp
+++ b/nn/runtime/test/generated/examples/space_to_depth_float_3.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: space_to_depth_float_3.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/space_to_depth_float_3_relaxed.example.cpp b/nn/runtime/test/generated/examples/space_to_depth_float_3_relaxed.example.cpp
index 9b3fe39..ee6a188 100644
--- a/nn/runtime/test/generated/examples/space_to_depth_float_3_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/space_to_depth_float_3_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: space_to_depth_float_3_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/space_to_depth_quant8_1.example.cpp b/nn/runtime/test/generated/examples/space_to_depth_quant8_1.example.cpp
index 20a3875..2cd4d13 100644
--- a/nn/runtime/test/generated/examples/space_to_depth_quant8_1.example.cpp
+++ b/nn/runtime/test/generated/examples/space_to_depth_quant8_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: space_to_depth_quant8_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/space_to_depth_quant8_2.example.cpp b/nn/runtime/test/generated/examples/space_to_depth_quant8_2.example.cpp
index 6009afc..73d9df9 100644
--- a/nn/runtime/test/generated/examples/space_to_depth_quant8_2.example.cpp
+++ b/nn/runtime/test/generated/examples/space_to_depth_quant8_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: space_to_depth_quant8_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/space_to_depth_v1_2.example.cpp b/nn/runtime/test/generated/examples/space_to_depth_v1_2.example.cpp
index 7738827..ce1c16c 100644
--- a/nn/runtime/test/generated/examples/space_to_depth_v1_2.example.cpp
+++ b/nn/runtime/test/generated/examples/space_to_depth_v1_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: space_to_depth_v1_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples_nhwc = {
+std::vector<MixedTypedExample>& get_examples_nhwc() {
+static std::vector<MixedTypedExample> examples_nhwc = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,48 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_float16() {
+static std::vector<MixedTypedExample> examples_nhwc_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.399999976158142f, 2.299999952316284f, 3.200000047683716f, 4.099999904632568f, 5.400000095367432f, 6.300000190734863f, 7.199999809265137f, 8.100000381469727f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.399999976158142f, 2.299999952316284f, 3.200000047683716f, 4.099999904632568f, 5.400000095367432f, 6.300000190734863f, 7.199999809265137f, 8.100000381469727f}}},
+}
+},
+}, // End of an example
+};
+return examples_nhwc_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nchw = {
+std::vector<MixedTypedExample>& get_examples_nchw() {
+static std::vector<MixedTypedExample> examples_nchw = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +182,11 @@
 },
 }, // End of an example
 };
+return examples_nchw;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -169,8 +219,48 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nchw_float16() {
+static std::vector<MixedTypedExample> examples_nchw_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.399999976158142f, 3.200000047683716f, 5.400000095367432f, 7.199999809265137f, 2.299999952316284f, 4.099999904632568f, 6.300000190734863f, 8.100000381469727f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.399999976158142f, 2.299999952316284f, 3.200000047683716f, 4.099999904632568f, 5.400000095367432f, 6.300000190734863f, 7.199999809265137f, 8.100000381469727f}}},
+}
+},
+}, // End of an example
+};
+return examples_nchw_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_nchw_quant8() {
+static std::vector<MixedTypedExample> examples_nchw_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -203,8 +293,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_2() {
+static std::vector<MixedTypedExample> examples_nhwc_2 = {
 // Begin of an example
 {
 .operands = {
@@ -237,8 +330,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -271,8 +367,48 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_float16_2() {
+static std::vector<MixedTypedExample> examples_nhwc_float16_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 5.0f, 6.0f, 3.0f, 4.0f, 7.0f, 8.0f, 9.0f, 10.0f, 13.0f, 14.0f, 11.0f, 12.0f, 15.0f, 16.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_nhwc_float16_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8_2() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -305,8 +441,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_2() {
+static std::vector<MixedTypedExample> examples_nchw_2 = {
 // Begin of an example
 {
 .operands = {
@@ -339,8 +478,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -373,8 +515,48 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_float16_2() {
+static std::vector<MixedTypedExample> examples_nchw_float16_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 5.0f, 6.0f, 3.0f, 4.0f, 7.0f, 8.0f, 9.0f, 10.0f, 13.0f, 14.0f, 11.0f, 12.0f, 15.0f, 16.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 5.0f, 9.0f, 13.0f, 2.0f, 6.0f, 10.0f, 14.0f, 3.0f, 7.0f, 11.0f, 15.0f, 4.0f, 8.0f, 12.0f, 16.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_nchw_float16_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_nchw_quant8_2() {
+static std::vector<MixedTypedExample> examples_nchw_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -407,8 +589,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_3 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_3() {
+static std::vector<MixedTypedExample> examples_nhwc_3 = {
 // Begin of an example
 {
 .operands = {
@@ -441,8 +626,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_3;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed_3 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed_3() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed_3 = {
 // Begin of an example
 {
 .operands = {
@@ -475,8 +663,48 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed_3;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8_3 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_float16_3() {
+static std::vector<MixedTypedExample> examples_nhwc_float16_3 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {10.0f, 20.0f, 11.0f, 21.0f, 12.0f, 22.0f, 13.0f, 23.0f, 14.0f, 24.0f, 15.0f, 25.0f, 16.0f, 26.0f, 17.0f, 27.0f, 18.0f, 28.0f, 19.0f, 29.0f, 110.0f, 210.0f, 111.0f, 211.0f, 112.0f, 212.0f, 113.0f, 213.0f, 114.0f, 214.0f, 115.0f, 215.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {10.0f, 20.0f, 11.0f, 21.0f, 14.0f, 24.0f, 15.0f, 25.0f, 12.0f, 22.0f, 13.0f, 23.0f, 16.0f, 26.0f, 17.0f, 27.0f, 18.0f, 28.0f, 19.0f, 29.0f, 112.0f, 212.0f, 113.0f, 213.0f, 110.0f, 210.0f, 111.0f, 211.0f, 114.0f, 214.0f, 115.0f, 215.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_nhwc_float16_3;
+};
+
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8_3() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8_3 = {
 // Begin of an example
 {
 .operands = {
@@ -509,8 +737,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8_3;
+};
 
-std::vector<MixedTypedExample> examples_nchw_3 = {
+std::vector<MixedTypedExample>& get_examples_nchw_3() {
+static std::vector<MixedTypedExample> examples_nchw_3 = {
 // Begin of an example
 {
 .operands = {
@@ -543,8 +774,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_3;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed_3 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed_3() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed_3 = {
 // Begin of an example
 {
 .operands = {
@@ -577,8 +811,48 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed_3;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8_3 = {
+std::vector<MixedTypedExample>& get_examples_nchw_float16_3() {
+static std::vector<MixedTypedExample> examples_nchw_float16_3 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 110.0f, 111.0f, 112.0f, 113.0f, 114.0f, 115.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 210.0f, 211.0f, 212.0f, 213.0f, 214.0f, 215.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {10.0f, 12.0f, 18.0f, 110.0f, 20.0f, 22.0f, 28.0f, 210.0f, 11.0f, 13.0f, 19.0f, 111.0f, 21.0f, 23.0f, 29.0f, 211.0f, 14.0f, 16.0f, 112.0f, 114.0f, 24.0f, 26.0f, 212.0f, 214.0f, 15.0f, 17.0f, 113.0f, 115.0f, 25.0f, 27.0f, 213.0f, 215.0f}}},
+}
+},
+}, // End of an example
+};
+return examples_nchw_float16_3;
+};
+
+std::vector<MixedTypedExample>& get_examples_nchw_quant8_3() {
+static std::vector<MixedTypedExample> examples_nchw_quant8_3 = {
 // Begin of an example
 {
 .operands = {
@@ -611,4 +885,6 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8_3;
+};
 
diff --git a/nn/runtime/test/generated/examples/split_float_1.example.cpp b/nn/runtime/test/generated/examples/split_float_1.example.cpp
index 319f2e5..b80d01c 100644
--- a/nn/runtime/test/generated/examples/split_float_1.example.cpp
+++ b/nn/runtime/test/generated/examples/split_float_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: split_float_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
-std::vector<MixedTypedExample> examples_relaxed = {
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_float16 = {
+std::vector<MixedTypedExample>& get_examples_float16() {
+static std::vector<MixedTypedExample> examples_float16 = {
 // Begin of an example
 {
 .operands = {
@@ -80,7 +87,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}}},
@@ -93,7 +100,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f}}, {1, {3.0f, 4.0f}}, {2, {5.0f, 6.0f}}},
@@ -101,4 +108,6 @@
 },
 }, // End of an example
 };
+return examples_float16;
+};
 
diff --git a/nn/runtime/test/generated/examples/split_float_2.example.cpp b/nn/runtime/test/generated/examples/split_float_2.example.cpp
index b68ac06..6f18c85 100644
--- a/nn/runtime/test/generated/examples/split_float_2.example.cpp
+++ b/nn/runtime/test/generated/examples/split_float_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: split_float_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
-std::vector<MixedTypedExample> examples_relaxed = {
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_float16 = {
+std::vector<MixedTypedExample>& get_examples_float16() {
+static std::vector<MixedTypedExample> examples_float16 = {
 // Begin of an example
 {
 .operands = {
@@ -80,7 +87,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}}},
@@ -93,7 +100,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 3.0f}}, {1, {4.0f, 5.0f, 6.0f}}},
@@ -101,4 +108,6 @@
 },
 }, // End of an example
 };
+return examples_float16;
+};
 
diff --git a/nn/runtime/test/generated/examples/split_float_3.example.cpp b/nn/runtime/test/generated/examples/split_float_3.example.cpp
index 2ba380c..0544d91 100644
--- a/nn/runtime/test/generated/examples/split_float_3.example.cpp
+++ b/nn/runtime/test/generated/examples/split_float_3.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: split_float_3.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
-std::vector<MixedTypedExample> examples_relaxed = {
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_float16 = {
+std::vector<MixedTypedExample>& get_examples_float16() {
+static std::vector<MixedTypedExample> examples_float16 = {
 // Begin of an example
 {
 .operands = {
@@ -80,7 +87,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}}},
@@ -93,7 +100,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 4.0f}}, {1, {2.0f, 5.0f}}, {2, {3.0f, 6.0f}}},
@@ -101,4 +108,6 @@
 },
 }, // End of an example
 };
+return examples_float16;
+};
 
diff --git a/nn/runtime/test/generated/examples/split_float_4.example.cpp b/nn/runtime/test/generated/examples/split_float_4.example.cpp
index b4acfd3..e7bd2bc 100644
--- a/nn/runtime/test/generated/examples/split_float_4.example.cpp
+++ b/nn/runtime/test/generated/examples/split_float_4.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: split_float_4.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
-std::vector<MixedTypedExample> examples_relaxed = {
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_float16 = {
+std::vector<MixedTypedExample>& get_examples_float16() {
+static std::vector<MixedTypedExample> examples_float16 = {
 // Begin of an example
 {
 .operands = {
@@ -80,7 +87,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
@@ -93,7 +100,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 5.0f, 6.0f}}, {1, {3.0f, 4.0f, 7.0f, 8.0f}}},
@@ -101,4 +108,6 @@
 },
 }, // End of an example
 };
+return examples_float16;
+};
 
diff --git a/nn/runtime/test/generated/examples/split_float_5.example.cpp b/nn/runtime/test/generated/examples/split_float_5.example.cpp
index d3fd91b..abf46b5 100644
--- a/nn/runtime/test/generated/examples/split_float_5.example.cpp
+++ b/nn/runtime/test/generated/examples/split_float_5.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: split_float_5.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
-std::vector<MixedTypedExample> examples_relaxed = {
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_float16 = {
+std::vector<MixedTypedExample>& get_examples_float16() {
+static std::vector<MixedTypedExample> examples_float16 = {
 // Begin of an example
 {
 .operands = {
@@ -80,7 +87,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
@@ -93,7 +100,7 @@
   {},
   // int -> QUANT8_ASYMM map
   {},
-  // int -> QUANT16_ASYMM map
+  // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
   {{0, {1.0f, 2.0f, 5.0f, 6.0f}}, {1, {3.0f, 4.0f, 7.0f, 8.0f}}},
@@ -101,4 +108,6 @@
 },
 }, // End of an example
 };
+return examples_float16;
+};
 
diff --git a/nn/runtime/test/generated/examples/split_int32_1.example.cpp b/nn/runtime/test/generated/examples/split_int32_1.example.cpp
index 67d8a58..9dc5f49 100644
--- a/nn/runtime/test/generated/examples/split_int32_1.example.cpp
+++ b/nn/runtime/test/generated/examples/split_int32_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: split_int32_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
-std::vector<MixedTypedExample> examples_relaxed = {
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,4 +71,6 @@
 },
 }, // End of an example
 };
+return examples_relaxed;
+};
 
diff --git a/nn/runtime/test/generated/examples/split_int32_2.example.cpp b/nn/runtime/test/generated/examples/split_int32_2.example.cpp
index e6998e7..e824afe 100644
--- a/nn/runtime/test/generated/examples/split_int32_2.example.cpp
+++ b/nn/runtime/test/generated/examples/split_int32_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: split_int32_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
-std::vector<MixedTypedExample> examples_relaxed = {
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,4 +71,6 @@
 },
 }, // End of an example
 };
+return examples_relaxed;
+};
 
diff --git a/nn/runtime/test/generated/examples/split_int32_3.example.cpp b/nn/runtime/test/generated/examples/split_int32_3.example.cpp
index ba5f88e..ee39678 100644
--- a/nn/runtime/test/generated/examples/split_int32_3.example.cpp
+++ b/nn/runtime/test/generated/examples/split_int32_3.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: split_int32_3.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
-std::vector<MixedTypedExample> examples_relaxed = {
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,4 +71,6 @@
 },
 }, // End of an example
 };
+return examples_relaxed;
+};
 
diff --git a/nn/runtime/test/generated/examples/split_int32_4.example.cpp b/nn/runtime/test/generated/examples/split_int32_4.example.cpp
index 29a13df..10b2792 100644
--- a/nn/runtime/test/generated/examples/split_int32_4.example.cpp
+++ b/nn/runtime/test/generated/examples/split_int32_4.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: split_int32_4.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
-std::vector<MixedTypedExample> examples_relaxed = {
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,4 +71,6 @@
 },
 }, // End of an example
 };
+return examples_relaxed;
+};
 
diff --git a/nn/runtime/test/generated/examples/split_quant8_1.example.cpp b/nn/runtime/test/generated/examples/split_quant8_1.example.cpp
index 1cf32e9..bfdd71f 100644
--- a/nn/runtime/test/generated/examples/split_quant8_1.example.cpp
+++ b/nn/runtime/test/generated/examples/split_quant8_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: split_quant8_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
-std::vector<MixedTypedExample> examples_relaxed = {
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,4 +71,6 @@
 },
 }, // End of an example
 };
+return examples_relaxed;
+};
 
diff --git a/nn/runtime/test/generated/examples/split_quant8_2.example.cpp b/nn/runtime/test/generated/examples/split_quant8_2.example.cpp
index 4b5a2b3..5d2dc99 100644
--- a/nn/runtime/test/generated/examples/split_quant8_2.example.cpp
+++ b/nn/runtime/test/generated/examples/split_quant8_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: split_quant8_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
-std::vector<MixedTypedExample> examples_relaxed = {
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,4 +71,6 @@
 },
 }, // End of an example
 };
+return examples_relaxed;
+};
 
diff --git a/nn/runtime/test/generated/examples/split_quant8_3.example.cpp b/nn/runtime/test/generated/examples/split_quant8_3.example.cpp
index 2477a1e..ec7a637 100644
--- a/nn/runtime/test/generated/examples/split_quant8_3.example.cpp
+++ b/nn/runtime/test/generated/examples/split_quant8_3.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: split_quant8_3.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/split_quant8_4.example.cpp b/nn/runtime/test/generated/examples/split_quant8_4.example.cpp
index f8d0e25..59bd8c8 100644
--- a/nn/runtime/test/generated/examples/split_quant8_4.example.cpp
+++ b/nn/runtime/test/generated/examples/split_quant8_4.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: split_quant8_4.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/squeeze.example.cpp b/nn/runtime/test/generated/examples/squeeze.example.cpp
index 94f5ca6..c32f879 100644
--- a/nn/runtime/test/generated/examples/squeeze.example.cpp
+++ b/nn/runtime/test/generated/examples/squeeze.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: squeeze.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/argmin_2_quant8.example.cpp b/nn/runtime/test/generated/examples/squeeze_float16.example.cpp
similarity index 64%
copy from nn/runtime/test/generated/examples/argmin_2_quant8.example.cpp
copy to nn/runtime/test/generated/examples/squeeze_float16.example.cpp
index ce68216..4d5bdbb 100644
--- a/nn/runtime/test/generated/examples/argmin_2_quant8.example.cpp
+++ b/nn/runtime/test/generated/examples/squeeze_float16.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
-// Generated file (from: argmin_2_quant8.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+// Generated file (from: squeeze_float16.mod.py). Do not edit
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -11,26 +12,28 @@
   // int -> INT32 map
   {},
   // int -> QUANT8_ASYMM map
-  {{0, {1, 2, 4, 3}}},
+  {},
   // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
-  {},
+  {{0, {1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f}}},
 },
 //Output(s)
 { // See tools/test_generator/include/TestHarness.h:MixedTyped
   // int -> FLOAT32 map
   {},
   // int -> INT32 map
-  {{0, {0, 0}}},
+  {},
   // int -> QUANT8_ASYMM map
   {},
   // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
-  {},
+  {{0, {1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f}}},
 }
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/squeeze_float_1.example.cpp b/nn/runtime/test/generated/examples/squeeze_float_1.example.cpp
index 4c4cc42..e9eb84b 100644
--- a/nn/runtime/test/generated/examples/squeeze_float_1.example.cpp
+++ b/nn/runtime/test/generated/examples/squeeze_float_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: squeeze_float_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/squeeze_float_1_relaxed.example.cpp b/nn/runtime/test/generated/examples/squeeze_float_1_relaxed.example.cpp
index 46f08b6..f468901 100644
--- a/nn/runtime/test/generated/examples/squeeze_float_1_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/squeeze_float_1_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: squeeze_float_1_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/squeeze_quant8_1.example.cpp b/nn/runtime/test/generated/examples/squeeze_quant8_1.example.cpp
index ea41495..add5358 100644
--- a/nn/runtime/test/generated/examples/squeeze_quant8_1.example.cpp
+++ b/nn/runtime/test/generated/examples/squeeze_quant8_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: squeeze_quant8_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/squeeze_relaxed.example.cpp b/nn/runtime/test/generated/examples/squeeze_relaxed.example.cpp
index ee24a72..11c40b9 100644
--- a/nn/runtime/test/generated/examples/squeeze_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/squeeze_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: squeeze_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice.example.cpp b/nn/runtime/test/generated/examples/strided_slice.example.cpp
index 33ecb02..b70d0ac 100644
--- a/nn/runtime/test/generated/examples/strided_slice.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/argmin_2_quant8.example.cpp b/nn/runtime/test/generated/examples/strided_slice_float16.example.cpp
similarity index 67%
copy from nn/runtime/test/generated/examples/argmin_2_quant8.example.cpp
copy to nn/runtime/test/generated/examples/strided_slice_float16.example.cpp
index ce68216..7890e28 100644
--- a/nn/runtime/test/generated/examples/argmin_2_quant8.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_float16.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
-// Generated file (from: argmin_2_quant8.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+// Generated file (from: strided_slice_float16.mod.py). Do not edit
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -11,26 +12,28 @@
   // int -> INT32 map
   {},
   // int -> QUANT8_ASYMM map
-  {{0, {1, 2, 4, 3}}},
+  {},
   // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
-  {},
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}}},
 },
 //Output(s)
 { // See tools/test_generator/include/TestHarness.h:MixedTyped
   // int -> FLOAT32 map
   {},
   // int -> INT32 map
-  {{0, {0, 0}}},
+  {},
   // int -> QUANT8_ASYMM map
   {},
   // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
-  {},
+  {{0, {1.0f, 3.0f}}},
 }
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_float_1.example.cpp b/nn/runtime/test/generated/examples/strided_slice_float_1.example.cpp
index d72d648..2777651 100644
--- a/nn/runtime/test/generated/examples/strided_slice_float_1.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_float_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_float_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_float_10.example.cpp b/nn/runtime/test/generated/examples/strided_slice_float_10.example.cpp
index 4b42baf..f194610 100644
--- a/nn/runtime/test/generated/examples/strided_slice_float_10.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_float_10.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_float_10.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_float_10_relaxed.example.cpp b/nn/runtime/test/generated/examples/strided_slice_float_10_relaxed.example.cpp
index d13c494..b09d193 100644
--- a/nn/runtime/test/generated/examples/strided_slice_float_10_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_float_10_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_float_10_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_float_11.example.cpp b/nn/runtime/test/generated/examples/strided_slice_float_11.example.cpp
index f4e367a..88ef755 100644
--- a/nn/runtime/test/generated/examples/strided_slice_float_11.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_float_11.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_float_11.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_float_11_relaxed.example.cpp b/nn/runtime/test/generated/examples/strided_slice_float_11_relaxed.example.cpp
index 209eb5f..90f9f34 100644
--- a/nn/runtime/test/generated/examples/strided_slice_float_11_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_float_11_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_float_11_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_float_1_relaxed.example.cpp b/nn/runtime/test/generated/examples/strided_slice_float_1_relaxed.example.cpp
index 86a180d..995199d 100644
--- a/nn/runtime/test/generated/examples/strided_slice_float_1_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_float_1_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_float_1_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_float_2.example.cpp b/nn/runtime/test/generated/examples/strided_slice_float_2.example.cpp
index 849a47a..69aec94 100644
--- a/nn/runtime/test/generated/examples/strided_slice_float_2.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_float_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_float_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_float_2_relaxed.example.cpp b/nn/runtime/test/generated/examples/strided_slice_float_2_relaxed.example.cpp
index e0b2a40..00bcb3b 100644
--- a/nn/runtime/test/generated/examples/strided_slice_float_2_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_float_2_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_float_2_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_float_3.example.cpp b/nn/runtime/test/generated/examples/strided_slice_float_3.example.cpp
index a31d564..00b5909 100644
--- a/nn/runtime/test/generated/examples/strided_slice_float_3.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_float_3.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_float_3.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_float_3_relaxed.example.cpp b/nn/runtime/test/generated/examples/strided_slice_float_3_relaxed.example.cpp
index 31b03d2..125c1a8 100644
--- a/nn/runtime/test/generated/examples/strided_slice_float_3_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_float_3_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_float_3_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_float_4.example.cpp b/nn/runtime/test/generated/examples/strided_slice_float_4.example.cpp
index c6f4d6d..26eb2e9 100644
--- a/nn/runtime/test/generated/examples/strided_slice_float_4.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_float_4.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_float_4.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_float_4_relaxed.example.cpp b/nn/runtime/test/generated/examples/strided_slice_float_4_relaxed.example.cpp
index 9c4598d..b63d71c 100644
--- a/nn/runtime/test/generated/examples/strided_slice_float_4_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_float_4_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_float_4_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_float_5.example.cpp b/nn/runtime/test/generated/examples/strided_slice_float_5.example.cpp
index be2d5c2..a1303ed 100644
--- a/nn/runtime/test/generated/examples/strided_slice_float_5.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_float_5.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_float_5.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_float_5_relaxed.example.cpp b/nn/runtime/test/generated/examples/strided_slice_float_5_relaxed.example.cpp
index 6a0abe2..80a3cf5 100644
--- a/nn/runtime/test/generated/examples/strided_slice_float_5_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_float_5_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_float_5_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_float_6.example.cpp b/nn/runtime/test/generated/examples/strided_slice_float_6.example.cpp
index a119a79..92f2b93 100644
--- a/nn/runtime/test/generated/examples/strided_slice_float_6.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_float_6.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_float_6.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_float_6_relaxed.example.cpp b/nn/runtime/test/generated/examples/strided_slice_float_6_relaxed.example.cpp
index 49f107f..14132ed 100644
--- a/nn/runtime/test/generated/examples/strided_slice_float_6_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_float_6_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_float_6_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_float_7.example.cpp b/nn/runtime/test/generated/examples/strided_slice_float_7.example.cpp
index a2b6586..4975cbb 100644
--- a/nn/runtime/test/generated/examples/strided_slice_float_7.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_float_7.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_float_7.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_float_7_relaxed.example.cpp b/nn/runtime/test/generated/examples/strided_slice_float_7_relaxed.example.cpp
index ea8e99c..829b5a4 100644
--- a/nn/runtime/test/generated/examples/strided_slice_float_7_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_float_7_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_float_7_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_float_8.example.cpp b/nn/runtime/test/generated/examples/strided_slice_float_8.example.cpp
index 4d91d78..b8b3b22 100644
--- a/nn/runtime/test/generated/examples/strided_slice_float_8.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_float_8.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_float_8.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_float_8_relaxed.example.cpp b/nn/runtime/test/generated/examples/strided_slice_float_8_relaxed.example.cpp
index 7e3981d..10168a0 100644
--- a/nn/runtime/test/generated/examples/strided_slice_float_8_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_float_8_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_float_8_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_float_9.example.cpp b/nn/runtime/test/generated/examples/strided_slice_float_9.example.cpp
index 5e8974f..9e68f61 100644
--- a/nn/runtime/test/generated/examples/strided_slice_float_9.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_float_9.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_float_9.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_float_9_relaxed.example.cpp b/nn/runtime/test/generated/examples/strided_slice_float_9_relaxed.example.cpp
index 205f122..668e0ab 100644
--- a/nn/runtime/test/generated/examples/strided_slice_float_9_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_float_9_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_float_9_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_qaunt8_10.example.cpp b/nn/runtime/test/generated/examples/strided_slice_qaunt8_10.example.cpp
index 25cce8c..aed6d18 100644
--- a/nn/runtime/test/generated/examples/strided_slice_qaunt8_10.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_qaunt8_10.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_qaunt8_10.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_qaunt8_11.example.cpp b/nn/runtime/test/generated/examples/strided_slice_qaunt8_11.example.cpp
index 894ed1e..b76a4bb 100644
--- a/nn/runtime/test/generated/examples/strided_slice_qaunt8_11.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_qaunt8_11.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_qaunt8_11.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_quant8_1.example.cpp b/nn/runtime/test/generated/examples/strided_slice_quant8_1.example.cpp
index a0d0110..1245861 100644
--- a/nn/runtime/test/generated/examples/strided_slice_quant8_1.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_quant8_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_quant8_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_quant8_2.example.cpp b/nn/runtime/test/generated/examples/strided_slice_quant8_2.example.cpp
index a713a0f..08a0f77 100644
--- a/nn/runtime/test/generated/examples/strided_slice_quant8_2.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_quant8_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_quant8_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_quant8_3.example.cpp b/nn/runtime/test/generated/examples/strided_slice_quant8_3.example.cpp
index 3a2d22d..a1a4493 100644
--- a/nn/runtime/test/generated/examples/strided_slice_quant8_3.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_quant8_3.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_quant8_3.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_quant8_4.example.cpp b/nn/runtime/test/generated/examples/strided_slice_quant8_4.example.cpp
index e2f4a70..af616e7 100644
--- a/nn/runtime/test/generated/examples/strided_slice_quant8_4.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_quant8_4.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_quant8_4.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_quant8_5.example.cpp b/nn/runtime/test/generated/examples/strided_slice_quant8_5.example.cpp
index 12f313f..3a5fee7 100644
--- a/nn/runtime/test/generated/examples/strided_slice_quant8_5.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_quant8_5.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_quant8_5.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_quant8_6.example.cpp b/nn/runtime/test/generated/examples/strided_slice_quant8_6.example.cpp
index 5e07a57..742e389 100644
--- a/nn/runtime/test/generated/examples/strided_slice_quant8_6.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_quant8_6.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_quant8_6.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_quant8_7.example.cpp b/nn/runtime/test/generated/examples/strided_slice_quant8_7.example.cpp
index df2870f..df16621 100644
--- a/nn/runtime/test/generated/examples/strided_slice_quant8_7.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_quant8_7.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_quant8_7.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_quant8_8.example.cpp b/nn/runtime/test/generated/examples/strided_slice_quant8_8.example.cpp
index 0a5654a..11f053c 100644
--- a/nn/runtime/test/generated/examples/strided_slice_quant8_8.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_quant8_8.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_quant8_8.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_quant8_9.example.cpp b/nn/runtime/test/generated/examples/strided_slice_quant8_9.example.cpp
index ba33b70..feb863e 100644
--- a/nn/runtime/test/generated/examples/strided_slice_quant8_9.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_quant8_9.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_quant8_9.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/strided_slice_relaxed.example.cpp b/nn/runtime/test/generated/examples/strided_slice_relaxed.example.cpp
index adcc0ff..008b297 100644
--- a/nn/runtime/test/generated/examples/strided_slice_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/strided_slice_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: strided_slice_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/sub.example.cpp b/nn/runtime/test/generated/examples/sub.example.cpp
index 7d8c0cb..ff3d100 100644
--- a/nn/runtime/test/generated/examples/sub.example.cpp
+++ b/nn/runtime/test/generated/examples/sub.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: sub.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/sub_broadcast_float.example.cpp b/nn/runtime/test/generated/examples/sub_broadcast_float.example.cpp
index b1862bf..bbc3d19 100644
--- a/nn/runtime/test/generated/examples/sub_broadcast_float.example.cpp
+++ b/nn/runtime/test/generated/examples/sub_broadcast_float.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: sub_broadcast_float.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/sub_broadcast_float_relaxed.example.cpp b/nn/runtime/test/generated/examples/sub_broadcast_float_relaxed.example.cpp
index 46c3736..f3b521e 100644
--- a/nn/runtime/test/generated/examples/sub_broadcast_float_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/sub_broadcast_float_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: sub_broadcast_float_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/sub_float16.example.cpp b/nn/runtime/test/generated/examples/sub_float16.example.cpp
index e1fab04..8b258dc 100644
--- a/nn/runtime/test/generated/examples/sub_float16.example.cpp
+++ b/nn/runtime/test/generated/examples/sub_float16.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: sub_float16.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/sub_float16_broadcast.example.cpp b/nn/runtime/test/generated/examples/sub_float16_broadcast.example.cpp
index 7e71fba..01708f7 100644
--- a/nn/runtime/test/generated/examples/sub_float16_broadcast.example.cpp
+++ b/nn/runtime/test/generated/examples/sub_float16_broadcast.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: sub_float16_broadcast.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/sub_quantized.example.cpp b/nn/runtime/test/generated/examples/sub_quantized.example.cpp
index 4b1bdf7..766776a 100644
--- a/nn/runtime/test/generated/examples/sub_quantized.example.cpp
+++ b/nn/runtime/test/generated/examples/sub_quantized.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: sub_quantized.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/sub_quantized_broadcast.example.cpp b/nn/runtime/test/generated/examples/sub_quantized_broadcast.example.cpp
index b1e402e..9e47a11 100644
--- a/nn/runtime/test/generated/examples/sub_quantized_broadcast.example.cpp
+++ b/nn/runtime/test/generated/examples/sub_quantized_broadcast.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: sub_quantized_broadcast.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/sub_quantized_different_scales.example.cpp b/nn/runtime/test/generated/examples/sub_quantized_different_scales.example.cpp
index a8145ee..466d229 100644
--- a/nn/runtime/test/generated/examples/sub_quantized_different_scales.example.cpp
+++ b/nn/runtime/test/generated/examples/sub_quantized_different_scales.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: sub_quantized_different_scales.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
-std::vector<MixedTypedExample> examples_2 = {
+std::vector<MixedTypedExample>& get_examples_2() {
+static std::vector<MixedTypedExample> examples_2 = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_2;
+};
 
-std::vector<MixedTypedExample> examples_3 = {
+std::vector<MixedTypedExample>& get_examples_3() {
+static std::vector<MixedTypedExample> examples_3 = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +108,11 @@
 },
 }, // End of an example
 };
+return examples_3;
+};
 
-std::vector<MixedTypedExample> examples_4 = {
+std::vector<MixedTypedExample>& get_examples_4() {
+static std::vector<MixedTypedExample> examples_4 = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_4;
+};
 
-std::vector<MixedTypedExample> examples_5 = {
+std::vector<MixedTypedExample>& get_examples_5() {
+static std::vector<MixedTypedExample> examples_5 = {
 // Begin of an example
 {
 .operands = {
@@ -169,8 +182,11 @@
 },
 }, // End of an example
 };
+return examples_5;
+};
 
-std::vector<MixedTypedExample> examples_6 = {
+std::vector<MixedTypedExample>& get_examples_6() {
+static std::vector<MixedTypedExample> examples_6 = {
 // Begin of an example
 {
 .operands = {
@@ -203,8 +219,11 @@
 },
 }, // End of an example
 };
+return examples_6;
+};
 
-std::vector<MixedTypedExample> examples_7 = {
+std::vector<MixedTypedExample>& get_examples_7() {
+static std::vector<MixedTypedExample> examples_7 = {
 // Begin of an example
 {
 .operands = {
@@ -237,8 +256,11 @@
 },
 }, // End of an example
 };
+return examples_7;
+};
 
-std::vector<MixedTypedExample> examples_8 = {
+std::vector<MixedTypedExample>& get_examples_8() {
+static std::vector<MixedTypedExample> examples_8 = {
 // Begin of an example
 {
 .operands = {
@@ -271,8 +293,11 @@
 },
 }, // End of an example
 };
+return examples_8;
+};
 
-std::vector<MixedTypedExample> examples_9 = {
+std::vector<MixedTypedExample>& get_examples_9() {
+static std::vector<MixedTypedExample> examples_9 = {
 // Begin of an example
 {
 .operands = {
@@ -305,8 +330,11 @@
 },
 }, // End of an example
 };
+return examples_9;
+};
 
-std::vector<MixedTypedExample> examples_10 = {
+std::vector<MixedTypedExample>& get_examples_10() {
+static std::vector<MixedTypedExample> examples_10 = {
 // Begin of an example
 {
 .operands = {
@@ -339,8 +367,11 @@
 },
 }, // End of an example
 };
+return examples_10;
+};
 
-std::vector<MixedTypedExample> examples_11 = {
+std::vector<MixedTypedExample>& get_examples_11() {
+static std::vector<MixedTypedExample> examples_11 = {
 // Begin of an example
 {
 .operands = {
@@ -373,8 +404,11 @@
 },
 }, // End of an example
 };
+return examples_11;
+};
 
-std::vector<MixedTypedExample> examples_12 = {
+std::vector<MixedTypedExample>& get_examples_12() {
+static std::vector<MixedTypedExample> examples_12 = {
 // Begin of an example
 {
 .operands = {
@@ -407,8 +441,11 @@
 },
 }, // End of an example
 };
+return examples_12;
+};
 
-std::vector<MixedTypedExample> examples_13 = {
+std::vector<MixedTypedExample>& get_examples_13() {
+static std::vector<MixedTypedExample> examples_13 = {
 // Begin of an example
 {
 .operands = {
@@ -441,8 +478,11 @@
 },
 }, // End of an example
 };
+return examples_13;
+};
 
-std::vector<MixedTypedExample> examples_14 = {
+std::vector<MixedTypedExample>& get_examples_14() {
+static std::vector<MixedTypedExample> examples_14 = {
 // Begin of an example
 {
 .operands = {
@@ -475,8 +515,11 @@
 },
 }, // End of an example
 };
+return examples_14;
+};
 
-std::vector<MixedTypedExample> examples_15 = {
+std::vector<MixedTypedExample>& get_examples_15() {
+static std::vector<MixedTypedExample> examples_15 = {
 // Begin of an example
 {
 .operands = {
@@ -509,8 +552,11 @@
 },
 }, // End of an example
 };
+return examples_15;
+};
 
-std::vector<MixedTypedExample> examples_16 = {
+std::vector<MixedTypedExample>& get_examples_16() {
+static std::vector<MixedTypedExample> examples_16 = {
 // Begin of an example
 {
 .operands = {
@@ -543,8 +589,11 @@
 },
 }, // End of an example
 };
+return examples_16;
+};
 
-std::vector<MixedTypedExample> examples_17 = {
+std::vector<MixedTypedExample>& get_examples_17() {
+static std::vector<MixedTypedExample> examples_17 = {
 // Begin of an example
 {
 .operands = {
@@ -577,8 +626,11 @@
 },
 }, // End of an example
 };
+return examples_17;
+};
 
-std::vector<MixedTypedExample> examples_18 = {
+std::vector<MixedTypedExample>& get_examples_18() {
+static std::vector<MixedTypedExample> examples_18 = {
 // Begin of an example
 {
 .operands = {
@@ -611,8 +663,11 @@
 },
 }, // End of an example
 };
+return examples_18;
+};
 
-std::vector<MixedTypedExample> examples_19 = {
+std::vector<MixedTypedExample>& get_examples_19() {
+static std::vector<MixedTypedExample> examples_19 = {
 // Begin of an example
 {
 .operands = {
@@ -645,8 +700,11 @@
 },
 }, // End of an example
 };
+return examples_19;
+};
 
-std::vector<MixedTypedExample> examples_20 = {
+std::vector<MixedTypedExample>& get_examples_20() {
+static std::vector<MixedTypedExample> examples_20 = {
 // Begin of an example
 {
 .operands = {
@@ -679,8 +737,11 @@
 },
 }, // End of an example
 };
+return examples_20;
+};
 
-std::vector<MixedTypedExample> examples_21 = {
+std::vector<MixedTypedExample>& get_examples_21() {
+static std::vector<MixedTypedExample> examples_21 = {
 // Begin of an example
 {
 .operands = {
@@ -713,8 +774,11 @@
 },
 }, // End of an example
 };
+return examples_21;
+};
 
-std::vector<MixedTypedExample> examples_22 = {
+std::vector<MixedTypedExample>& get_examples_22() {
+static std::vector<MixedTypedExample> examples_22 = {
 // Begin of an example
 {
 .operands = {
@@ -747,8 +811,11 @@
 },
 }, // End of an example
 };
+return examples_22;
+};
 
-std::vector<MixedTypedExample> examples_23 = {
+std::vector<MixedTypedExample>& get_examples_23() {
+static std::vector<MixedTypedExample> examples_23 = {
 // Begin of an example
 {
 .operands = {
@@ -781,8 +848,11 @@
 },
 }, // End of an example
 };
+return examples_23;
+};
 
-std::vector<MixedTypedExample> examples_24 = {
+std::vector<MixedTypedExample>& get_examples_24() {
+static std::vector<MixedTypedExample> examples_24 = {
 // Begin of an example
 {
 .operands = {
@@ -815,8 +885,11 @@
 },
 }, // End of an example
 };
+return examples_24;
+};
 
-std::vector<MixedTypedExample> examples_25 = {
+std::vector<MixedTypedExample>& get_examples_25() {
+static std::vector<MixedTypedExample> examples_25 = {
 // Begin of an example
 {
 .operands = {
@@ -849,8 +922,11 @@
 },
 }, // End of an example
 };
+return examples_25;
+};
 
-std::vector<MixedTypedExample> examples_26 = {
+std::vector<MixedTypedExample>& get_examples_26() {
+static std::vector<MixedTypedExample> examples_26 = {
 // Begin of an example
 {
 .operands = {
@@ -883,8 +959,11 @@
 },
 }, // End of an example
 };
+return examples_26;
+};
 
-std::vector<MixedTypedExample> examples_27 = {
+std::vector<MixedTypedExample>& get_examples_27() {
+static std::vector<MixedTypedExample> examples_27 = {
 // Begin of an example
 {
 .operands = {
@@ -917,8 +996,11 @@
 },
 }, // End of an example
 };
+return examples_27;
+};
 
-std::vector<MixedTypedExample> examples_28 = {
+std::vector<MixedTypedExample>& get_examples_28() {
+static std::vector<MixedTypedExample> examples_28 = {
 // Begin of an example
 {
 .operands = {
@@ -951,8 +1033,11 @@
 },
 }, // End of an example
 };
+return examples_28;
+};
 
-std::vector<MixedTypedExample> examples_29 = {
+std::vector<MixedTypedExample>& get_examples_29() {
+static std::vector<MixedTypedExample> examples_29 = {
 // Begin of an example
 {
 .operands = {
@@ -985,8 +1070,11 @@
 },
 }, // End of an example
 };
+return examples_29;
+};
 
-std::vector<MixedTypedExample> examples_30 = {
+std::vector<MixedTypedExample>& get_examples_30() {
+static std::vector<MixedTypedExample> examples_30 = {
 // Begin of an example
 {
 .operands = {
@@ -1019,8 +1107,11 @@
 },
 }, // End of an example
 };
+return examples_30;
+};
 
-std::vector<MixedTypedExample> examples_31 = {
+std::vector<MixedTypedExample>& get_examples_31() {
+static std::vector<MixedTypedExample> examples_31 = {
 // Begin of an example
 {
 .operands = {
@@ -1053,8 +1144,11 @@
 },
 }, // End of an example
 };
+return examples_31;
+};
 
-std::vector<MixedTypedExample> examples_32 = {
+std::vector<MixedTypedExample>& get_examples_32() {
+static std::vector<MixedTypedExample> examples_32 = {
 // Begin of an example
 {
 .operands = {
@@ -1087,8 +1181,11 @@
 },
 }, // End of an example
 };
+return examples_32;
+};
 
-std::vector<MixedTypedExample> examples_33 = {
+std::vector<MixedTypedExample>& get_examples_33() {
+static std::vector<MixedTypedExample> examples_33 = {
 // Begin of an example
 {
 .operands = {
@@ -1121,8 +1218,11 @@
 },
 }, // End of an example
 };
+return examples_33;
+};
 
-std::vector<MixedTypedExample> examples_34 = {
+std::vector<MixedTypedExample>& get_examples_34() {
+static std::vector<MixedTypedExample> examples_34 = {
 // Begin of an example
 {
 .operands = {
@@ -1155,8 +1255,11 @@
 },
 }, // End of an example
 };
+return examples_34;
+};
 
-std::vector<MixedTypedExample> examples_35 = {
+std::vector<MixedTypedExample>& get_examples_35() {
+static std::vector<MixedTypedExample> examples_35 = {
 // Begin of an example
 {
 .operands = {
@@ -1189,8 +1292,11 @@
 },
 }, // End of an example
 };
+return examples_35;
+};
 
-std::vector<MixedTypedExample> examples_36 = {
+std::vector<MixedTypedExample>& get_examples_36() {
+static std::vector<MixedTypedExample> examples_36 = {
 // Begin of an example
 {
 .operands = {
@@ -1223,8 +1329,11 @@
 },
 }, // End of an example
 };
+return examples_36;
+};
 
-std::vector<MixedTypedExample> examples_37 = {
+std::vector<MixedTypedExample>& get_examples_37() {
+static std::vector<MixedTypedExample> examples_37 = {
 // Begin of an example
 {
 .operands = {
@@ -1257,8 +1366,11 @@
 },
 }, // End of an example
 };
+return examples_37;
+};
 
-std::vector<MixedTypedExample> examples_38 = {
+std::vector<MixedTypedExample>& get_examples_38() {
+static std::vector<MixedTypedExample> examples_38 = {
 // Begin of an example
 {
 .operands = {
@@ -1291,8 +1403,11 @@
 },
 }, // End of an example
 };
+return examples_38;
+};
 
-std::vector<MixedTypedExample> examples_39 = {
+std::vector<MixedTypedExample>& get_examples_39() {
+static std::vector<MixedTypedExample> examples_39 = {
 // Begin of an example
 {
 .operands = {
@@ -1325,8 +1440,11 @@
 },
 }, // End of an example
 };
+return examples_39;
+};
 
-std::vector<MixedTypedExample> examples_40 = {
+std::vector<MixedTypedExample>& get_examples_40() {
+static std::vector<MixedTypedExample> examples_40 = {
 // Begin of an example
 {
 .operands = {
@@ -1359,8 +1477,11 @@
 },
 }, // End of an example
 };
+return examples_40;
+};
 
-std::vector<MixedTypedExample> examples_41 = {
+std::vector<MixedTypedExample>& get_examples_41() {
+static std::vector<MixedTypedExample> examples_41 = {
 // Begin of an example
 {
 .operands = {
@@ -1393,8 +1514,11 @@
 },
 }, // End of an example
 };
+return examples_41;
+};
 
-std::vector<MixedTypedExample> examples_42 = {
+std::vector<MixedTypedExample>& get_examples_42() {
+static std::vector<MixedTypedExample> examples_42 = {
 // Begin of an example
 {
 .operands = {
@@ -1427,8 +1551,11 @@
 },
 }, // End of an example
 };
+return examples_42;
+};
 
-std::vector<MixedTypedExample> examples_43 = {
+std::vector<MixedTypedExample>& get_examples_43() {
+static std::vector<MixedTypedExample> examples_43 = {
 // Begin of an example
 {
 .operands = {
@@ -1461,8 +1588,11 @@
 },
 }, // End of an example
 };
+return examples_43;
+};
 
-std::vector<MixedTypedExample> examples_44 = {
+std::vector<MixedTypedExample>& get_examples_44() {
+static std::vector<MixedTypedExample> examples_44 = {
 // Begin of an example
 {
 .operands = {
@@ -1495,8 +1625,11 @@
 },
 }, // End of an example
 };
+return examples_44;
+};
 
-std::vector<MixedTypedExample> examples_45 = {
+std::vector<MixedTypedExample>& get_examples_45() {
+static std::vector<MixedTypedExample> examples_45 = {
 // Begin of an example
 {
 .operands = {
@@ -1529,8 +1662,11 @@
 },
 }, // End of an example
 };
+return examples_45;
+};
 
-std::vector<MixedTypedExample> examples_46 = {
+std::vector<MixedTypedExample>& get_examples_46() {
+static std::vector<MixedTypedExample> examples_46 = {
 // Begin of an example
 {
 .operands = {
@@ -1563,8 +1699,11 @@
 },
 }, // End of an example
 };
+return examples_46;
+};
 
-std::vector<MixedTypedExample> examples_47 = {
+std::vector<MixedTypedExample>& get_examples_47() {
+static std::vector<MixedTypedExample> examples_47 = {
 // Begin of an example
 {
 .operands = {
@@ -1597,8 +1736,11 @@
 },
 }, // End of an example
 };
+return examples_47;
+};
 
-std::vector<MixedTypedExample> examples_48 = {
+std::vector<MixedTypedExample>& get_examples_48() {
+static std::vector<MixedTypedExample> examples_48 = {
 // Begin of an example
 {
 .operands = {
@@ -1631,8 +1773,11 @@
 },
 }, // End of an example
 };
+return examples_48;
+};
 
-std::vector<MixedTypedExample> examples_49 = {
+std::vector<MixedTypedExample>& get_examples_49() {
+static std::vector<MixedTypedExample> examples_49 = {
 // Begin of an example
 {
 .operands = {
@@ -1665,8 +1810,11 @@
 },
 }, // End of an example
 };
+return examples_49;
+};
 
-std::vector<MixedTypedExample> examples_50 = {
+std::vector<MixedTypedExample>& get_examples_50() {
+static std::vector<MixedTypedExample> examples_50 = {
 // Begin of an example
 {
 .operands = {
@@ -1699,8 +1847,11 @@
 },
 }, // End of an example
 };
+return examples_50;
+};
 
-std::vector<MixedTypedExample> examples_51 = {
+std::vector<MixedTypedExample>& get_examples_51() {
+static std::vector<MixedTypedExample> examples_51 = {
 // Begin of an example
 {
 .operands = {
@@ -1733,8 +1884,11 @@
 },
 }, // End of an example
 };
+return examples_51;
+};
 
-std::vector<MixedTypedExample> examples_52 = {
+std::vector<MixedTypedExample>& get_examples_52() {
+static std::vector<MixedTypedExample> examples_52 = {
 // Begin of an example
 {
 .operands = {
@@ -1767,8 +1921,11 @@
 },
 }, // End of an example
 };
+return examples_52;
+};
 
-std::vector<MixedTypedExample> examples_53 = {
+std::vector<MixedTypedExample>& get_examples_53() {
+static std::vector<MixedTypedExample> examples_53 = {
 // Begin of an example
 {
 .operands = {
@@ -1801,8 +1958,11 @@
 },
 }, // End of an example
 };
+return examples_53;
+};
 
-std::vector<MixedTypedExample> examples_54 = {
+std::vector<MixedTypedExample>& get_examples_54() {
+static std::vector<MixedTypedExample> examples_54 = {
 // Begin of an example
 {
 .operands = {
@@ -1835,8 +1995,11 @@
 },
 }, // End of an example
 };
+return examples_54;
+};
 
-std::vector<MixedTypedExample> examples_55 = {
+std::vector<MixedTypedExample>& get_examples_55() {
+static std::vector<MixedTypedExample> examples_55 = {
 // Begin of an example
 {
 .operands = {
@@ -1869,8 +2032,11 @@
 },
 }, // End of an example
 };
+return examples_55;
+};
 
-std::vector<MixedTypedExample> examples_56 = {
+std::vector<MixedTypedExample>& get_examples_56() {
+static std::vector<MixedTypedExample> examples_56 = {
 // Begin of an example
 {
 .operands = {
@@ -1903,8 +2069,11 @@
 },
 }, // End of an example
 };
+return examples_56;
+};
 
-std::vector<MixedTypedExample> examples_57 = {
+std::vector<MixedTypedExample>& get_examples_57() {
+static std::vector<MixedTypedExample> examples_57 = {
 // Begin of an example
 {
 .operands = {
@@ -1937,8 +2106,11 @@
 },
 }, // End of an example
 };
+return examples_57;
+};
 
-std::vector<MixedTypedExample> examples_58 = {
+std::vector<MixedTypedExample>& get_examples_58() {
+static std::vector<MixedTypedExample> examples_58 = {
 // Begin of an example
 {
 .operands = {
@@ -1971,8 +2143,11 @@
 },
 }, // End of an example
 };
+return examples_58;
+};
 
-std::vector<MixedTypedExample> examples_59 = {
+std::vector<MixedTypedExample>& get_examples_59() {
+static std::vector<MixedTypedExample> examples_59 = {
 // Begin of an example
 {
 .operands = {
@@ -2005,8 +2180,11 @@
 },
 }, // End of an example
 };
+return examples_59;
+};
 
-std::vector<MixedTypedExample> examples_60 = {
+std::vector<MixedTypedExample>& get_examples_60() {
+static std::vector<MixedTypedExample> examples_60 = {
 // Begin of an example
 {
 .operands = {
@@ -2039,8 +2217,11 @@
 },
 }, // End of an example
 };
+return examples_60;
+};
 
-std::vector<MixedTypedExample> examples_61 = {
+std::vector<MixedTypedExample>& get_examples_61() {
+static std::vector<MixedTypedExample> examples_61 = {
 // Begin of an example
 {
 .operands = {
@@ -2073,8 +2254,11 @@
 },
 }, // End of an example
 };
+return examples_61;
+};
 
-std::vector<MixedTypedExample> examples_62 = {
+std::vector<MixedTypedExample>& get_examples_62() {
+static std::vector<MixedTypedExample> examples_62 = {
 // Begin of an example
 {
 .operands = {
@@ -2107,8 +2291,11 @@
 },
 }, // End of an example
 };
+return examples_62;
+};
 
-std::vector<MixedTypedExample> examples_63 = {
+std::vector<MixedTypedExample>& get_examples_63() {
+static std::vector<MixedTypedExample> examples_63 = {
 // Begin of an example
 {
 .operands = {
@@ -2141,8 +2328,11 @@
 },
 }, // End of an example
 };
+return examples_63;
+};
 
-std::vector<MixedTypedExample> examples_64 = {
+std::vector<MixedTypedExample>& get_examples_64() {
+static std::vector<MixedTypedExample> examples_64 = {
 // Begin of an example
 {
 .operands = {
@@ -2175,4 +2365,6 @@
 },
 }, // End of an example
 };
+return examples_64;
+};
 
diff --git a/nn/runtime/test/generated/examples/sub_relaxed.example.cpp b/nn/runtime/test/generated/examples/sub_relaxed.example.cpp
index f031bb7..08d0cac 100644
--- a/nn/runtime/test/generated/examples/sub_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/sub_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: sub_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/svdf.example.cpp b/nn/runtime/test/generated/examples/svdf.example.cpp
index 4e51028..6b77ff3 100644
--- a/nn/runtime/test/generated/examples/svdf.example.cpp
+++ b/nn/runtime/test/generated/examples/svdf.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: svdf.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/svdf2.example.cpp b/nn/runtime/test/generated/examples/svdf2.example.cpp
index 1673e04..fbf95fa 100644
--- a/nn/runtime/test/generated/examples/svdf2.example.cpp
+++ b/nn/runtime/test/generated/examples/svdf2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: svdf2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/svdf2_relaxed.example.cpp b/nn/runtime/test/generated/examples/svdf2_relaxed.example.cpp
index 0afbe5c..822f706 100644
--- a/nn/runtime/test/generated/examples/svdf2_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/svdf2_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: svdf2_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/svdf_relaxed.example.cpp b/nn/runtime/test/generated/examples/svdf_relaxed.example.cpp
index 0b4d14b..437738c 100644
--- a/nn/runtime/test/generated/examples/svdf_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/svdf_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: svdf_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/svdf_state.example.cpp b/nn/runtime/test/generated/examples/svdf_state.example.cpp
index 7843f05..b0775b8 100644
--- a/nn/runtime/test/generated/examples/svdf_state.example.cpp
+++ b/nn/runtime/test/generated/examples/svdf_state.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: svdf_state.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/svdf_state_relaxed.example.cpp b/nn/runtime/test/generated/examples/svdf_state_relaxed.example.cpp
index 15559b0..8c1cd0d 100644
--- a/nn/runtime/test/generated/examples/svdf_state_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/svdf_state_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: svdf_state_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/tanh.example.cpp b/nn/runtime/test/generated/examples/tanh.example.cpp
index 28f8610..40c8cf3 100644
--- a/nn/runtime/test/generated/examples/tanh.example.cpp
+++ b/nn/runtime/test/generated/examples/tanh.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: tanh.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/tanh_float16.example.cpp b/nn/runtime/test/generated/examples/tanh_float16.example.cpp
index 81baeff..484f889 100644
--- a/nn/runtime/test/generated/examples/tanh_float16.example.cpp
+++ b/nn/runtime/test/generated/examples/tanh_float16.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: tanh_float16.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/tanh_quantized.example.cpp b/nn/runtime/test/generated/examples/tanh_quantized.example.cpp
index 690beb3..cf5675b 100644
--- a/nn/runtime/test/generated/examples/tanh_quantized.example.cpp
+++ b/nn/runtime/test/generated/examples/tanh_quantized.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: tanh_quantized.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/tanh_relaxed.example.cpp b/nn/runtime/test/generated/examples/tanh_relaxed.example.cpp
index ab240b5..4ade0e6 100644
--- a/nn/runtime/test/generated/examples/tanh_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/tanh_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: tanh_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/tile_1.example.cpp b/nn/runtime/test/generated/examples/tile_1.example.cpp
index 66a5341..97f848b 100644
--- a/nn/runtime/test/generated/examples/tile_1.example.cpp
+++ b/nn/runtime/test/generated/examples/tile_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: tile_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
-std::vector<MixedTypedExample> examples_relaxed = {
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_float16 = {
+std::vector<MixedTypedExample>& get_examples_float16() {
+static std::vector<MixedTypedExample> examples_float16 = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +108,11 @@
 },
 }, // End of an example
 };
+return examples_float16;
+};
 
-std::vector<MixedTypedExample> examples_quant8 = {
+std::vector<MixedTypedExample>& get_examples_quant8() {
+static std::vector<MixedTypedExample> examples_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -135,4 +145,6 @@
 },
 }, // End of an example
 };
+return examples_quant8;
+};
 
diff --git a/nn/runtime/test/generated/examples/tile_2.example.cpp b/nn/runtime/test/generated/examples/tile_2.example.cpp
index a5b3ab9..d1df523 100644
--- a/nn/runtime/test/generated/examples/tile_2.example.cpp
+++ b/nn/runtime/test/generated/examples/tile_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: tile_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
-std::vector<MixedTypedExample> examples_relaxed = {
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_float16 = {
+std::vector<MixedTypedExample>& get_examples_float16() {
+static std::vector<MixedTypedExample> examples_float16 = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +108,11 @@
 },
 }, // End of an example
 };
+return examples_float16;
+};
 
-std::vector<MixedTypedExample> examples_quant8 = {
+std::vector<MixedTypedExample>& get_examples_quant8() {
+static std::vector<MixedTypedExample> examples_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_quant8;
+};
 
-std::vector<MixedTypedExample> examples_int32 = {
+std::vector<MixedTypedExample>& get_examples_int32() {
+static std::vector<MixedTypedExample> examples_int32 = {
 // Begin of an example
 {
 .operands = {
@@ -169,4 +182,6 @@
 },
 }, // End of an example
 };
+return examples_int32;
+};
 
diff --git a/nn/runtime/test/generated/examples/tile_3.example.cpp b/nn/runtime/test/generated/examples/tile_3.example.cpp
index e1d873e..1a03028 100644
--- a/nn/runtime/test/generated/examples/tile_3.example.cpp
+++ b/nn/runtime/test/generated/examples/tile_3.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: tile_3.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
-std::vector<MixedTypedExample> examples_relaxed = {
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_float16 = {
+std::vector<MixedTypedExample>& get_examples_float16() {
+static std::vector<MixedTypedExample> examples_float16 = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +108,11 @@
 },
 }, // End of an example
 };
+return examples_float16;
+};
 
-std::vector<MixedTypedExample> examples_quant8 = {
+std::vector<MixedTypedExample>& get_examples_quant8() {
+static std::vector<MixedTypedExample> examples_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_quant8;
+};
 
-std::vector<MixedTypedExample> examples_int32 = {
+std::vector<MixedTypedExample>& get_examples_int32() {
+static std::vector<MixedTypedExample> examples_int32 = {
 // Begin of an example
 {
 .operands = {
@@ -169,4 +182,6 @@
 },
 }, // End of an example
 };
+return examples_int32;
+};
 
diff --git a/nn/runtime/test/generated/examples/topk_v2.example.cpp b/nn/runtime/test/generated/examples/topk_v2.example.cpp
index 72fb73f..57821ef 100644
--- a/nn/runtime/test/generated/examples/topk_v2.example.cpp
+++ b/nn/runtime/test/generated/examples/topk_v2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: topk_v2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
-std::vector<MixedTypedExample> examples_relaxed = {
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_float16 = {
+std::vector<MixedTypedExample>& get_examples_float16() {
+static std::vector<MixedTypedExample> examples_float16 = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +108,11 @@
 },
 }, // End of an example
 };
+return examples_float16;
+};
 
-std::vector<MixedTypedExample> examples_2 = {
+std::vector<MixedTypedExample>& get_examples_2() {
+static std::vector<MixedTypedExample> examples_2 = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_2;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_2() {
+static std::vector<MixedTypedExample> examples_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -169,8 +182,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_float16_2 = {
+std::vector<MixedTypedExample>& get_examples_float16_2() {
+static std::vector<MixedTypedExample> examples_float16_2 = {
 // Begin of an example
 {
 .operands = {
@@ -203,8 +219,11 @@
 },
 }, // End of an example
 };
+return examples_float16_2;
+};
 
-std::vector<MixedTypedExample> examples_3 = {
+std::vector<MixedTypedExample>& get_examples_3() {
+static std::vector<MixedTypedExample> examples_3 = {
 // Begin of an example
 {
 .operands = {
@@ -237,8 +256,11 @@
 },
 }, // End of an example
 };
+return examples_3;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_3 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_3() {
+static std::vector<MixedTypedExample> examples_relaxed_3 = {
 // Begin of an example
 {
 .operands = {
@@ -271,8 +293,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_3;
+};
 
-std::vector<MixedTypedExample> examples_float16_3 = {
+std::vector<MixedTypedExample>& get_examples_float16_3() {
+static std::vector<MixedTypedExample> examples_float16_3 = {
 // Begin of an example
 {
 .operands = {
@@ -305,8 +330,11 @@
 },
 }, // End of an example
 };
+return examples_float16_3;
+};
 
-std::vector<MixedTypedExample> examples_4 = {
+std::vector<MixedTypedExample>& get_examples_4() {
+static std::vector<MixedTypedExample> examples_4 = {
 // Begin of an example
 {
 .operands = {
@@ -339,8 +367,11 @@
 },
 }, // End of an example
 };
+return examples_4;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_4 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_4() {
+static std::vector<MixedTypedExample> examples_relaxed_4 = {
 // Begin of an example
 {
 .operands = {
@@ -373,8 +404,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_4;
+};
 
-std::vector<MixedTypedExample> examples_float16_4 = {
+std::vector<MixedTypedExample>& get_examples_float16_4() {
+static std::vector<MixedTypedExample> examples_float16_4 = {
 // Begin of an example
 {
 .operands = {
@@ -407,8 +441,11 @@
 },
 }, // End of an example
 };
+return examples_float16_4;
+};
 
-std::vector<MixedTypedExample> examples_5 = {
+std::vector<MixedTypedExample>& get_examples_5() {
+static std::vector<MixedTypedExample> examples_5 = {
 // Begin of an example
 {
 .operands = {
@@ -441,8 +478,11 @@
 },
 }, // End of an example
 };
+return examples_5;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_5 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_5() {
+static std::vector<MixedTypedExample> examples_relaxed_5 = {
 // Begin of an example
 {
 .operands = {
@@ -475,8 +515,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_5;
+};
 
-std::vector<MixedTypedExample> examples_float16_5 = {
+std::vector<MixedTypedExample>& get_examples_float16_5() {
+static std::vector<MixedTypedExample> examples_float16_5 = {
 // Begin of an example
 {
 .operands = {
@@ -509,8 +552,11 @@
 },
 }, // End of an example
 };
+return examples_float16_5;
+};
 
-std::vector<MixedTypedExample> examples_6 = {
+std::vector<MixedTypedExample>& get_examples_6() {
+static std::vector<MixedTypedExample> examples_6 = {
 // Begin of an example
 {
 .operands = {
@@ -543,8 +589,11 @@
 },
 }, // End of an example
 };
+return examples_6;
+};
 
-std::vector<MixedTypedExample> examples_relaxed_6 = {
+std::vector<MixedTypedExample>& get_examples_relaxed_6() {
+static std::vector<MixedTypedExample> examples_relaxed_6 = {
 // Begin of an example
 {
 .operands = {
@@ -577,8 +626,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed_6;
+};
 
-std::vector<MixedTypedExample> examples_float16_6 = {
+std::vector<MixedTypedExample>& get_examples_float16_6() {
+static std::vector<MixedTypedExample> examples_float16_6 = {
 // Begin of an example
 {
 .operands = {
@@ -611,4 +663,6 @@
 },
 }, // End of an example
 };
+return examples_float16_6;
+};
 
diff --git a/nn/runtime/test/generated/examples/transpose.example.cpp b/nn/runtime/test/generated/examples/transpose.example.cpp
index dd30653..a88443f 100644
--- a/nn/runtime/test/generated/examples/transpose.example.cpp
+++ b/nn/runtime/test/generated/examples/transpose.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: transpose.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/transpose_conv2d.example.cpp b/nn/runtime/test/generated/examples/transpose_conv2d.example.cpp
index f39c614..8de0db1 100644
--- a/nn/runtime/test/generated/examples/transpose_conv2d.example.cpp
+++ b/nn/runtime/test/generated/examples/transpose_conv2d.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: transpose_conv2d.mod.py). Do not edit
-std::vector<MixedTypedExample> examples_nhwc_none = {
+std::vector<MixedTypedExample>& get_examples_nhwc_none() {
+static std::vector<MixedTypedExample> examples_nhwc_none = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_none;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_none_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nhwc_none_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nhwc_none_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_none_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_none_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nhwc_none_relaxed() {
+static std::vector<MixedTypedExample> examples_nhwc_none_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -101,8 +108,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_none_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_none_relaxed_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nhwc_none_relaxed_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nhwc_none_relaxed_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -135,8 +145,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_none_relaxed_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_none_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_none_quant8() {
+static std::vector<MixedTypedExample> examples_nhwc_none_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -169,8 +182,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_none_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_none_quant8_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nhwc_none_quant8_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nhwc_none_quant8_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -203,8 +219,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_none_quant8_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu() {
+static std::vector<MixedTypedExample> examples_nhwc_relu = {
 // Begin of an example
 {
 .operands = {
@@ -237,8 +256,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nhwc_relu_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -271,8 +293,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu_relaxed() {
+static std::vector<MixedTypedExample> examples_nhwc_relu_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -305,8 +330,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu_relaxed_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu_relaxed_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nhwc_relu_relaxed_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -339,8 +367,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu_relaxed_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu_quant8() {
+static std::vector<MixedTypedExample> examples_nhwc_relu_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -373,8 +404,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu_quant8_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu_quant8_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nhwc_relu_quant8_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -407,8 +441,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu_quant8_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu1 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu1() {
+static std::vector<MixedTypedExample> examples_nhwc_relu1 = {
 // Begin of an example
 {
 .operands = {
@@ -441,8 +478,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu1;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu1_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu1_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nhwc_relu1_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -475,8 +515,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu1_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu1_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu1_relaxed() {
+static std::vector<MixedTypedExample> examples_nhwc_relu1_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -509,8 +552,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu1_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu1_relaxed_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu1_relaxed_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nhwc_relu1_relaxed_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -543,8 +589,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu1_relaxed_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu1_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu1_quant8() {
+static std::vector<MixedTypedExample> examples_nhwc_relu1_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -577,8 +626,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu1_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu1_quant8_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu1_quant8_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nhwc_relu1_quant8_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -611,8 +663,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu1_quant8_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu6 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu6() {
+static std::vector<MixedTypedExample> examples_nhwc_relu6 = {
 // Begin of an example
 {
 .operands = {
@@ -645,8 +700,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu6;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu6_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu6_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nhwc_relu6_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -679,8 +737,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu6_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu6_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu6_relaxed() {
+static std::vector<MixedTypedExample> examples_nhwc_relu6_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -713,8 +774,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu6_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu6_relaxed_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu6_relaxed_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nhwc_relu6_relaxed_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -747,8 +811,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu6_relaxed_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu6_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu6_quant8() {
+static std::vector<MixedTypedExample> examples_nhwc_relu6_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -781,8 +848,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu6_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relu6_quant8_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relu6_quant8_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nhwc_relu6_quant8_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -815,8 +885,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relu6_quant8_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nchw_none = {
+std::vector<MixedTypedExample>& get_examples_nchw_none() {
+static std::vector<MixedTypedExample> examples_nchw_none = {
 // Begin of an example
 {
 .operands = {
@@ -849,8 +922,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_none;
+};
 
-std::vector<MixedTypedExample> examples_nchw_none_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nchw_none_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nchw_none_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -883,8 +959,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_none_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nchw_none_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nchw_none_relaxed() {
+static std::vector<MixedTypedExample> examples_nchw_none_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -917,8 +996,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_none_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nchw_none_relaxed_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nchw_none_relaxed_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nchw_none_relaxed_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -951,8 +1033,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_none_relaxed_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nchw_none_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nchw_none_quant8() {
+static std::vector<MixedTypedExample> examples_nchw_none_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -985,8 +1070,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_none_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nchw_none_quant8_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nchw_none_quant8_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nchw_none_quant8_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1019,8 +1107,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_none_quant8_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu() {
+static std::vector<MixedTypedExample> examples_nchw_relu = {
 // Begin of an example
 {
 .operands = {
@@ -1053,8 +1144,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nchw_relu_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1087,8 +1181,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu_relaxed() {
+static std::vector<MixedTypedExample> examples_nchw_relu_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -1121,8 +1218,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu_relaxed_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu_relaxed_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nchw_relu_relaxed_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1155,8 +1255,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu_relaxed_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu_quant8() {
+static std::vector<MixedTypedExample> examples_nchw_relu_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -1189,8 +1292,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu_quant8_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu_quant8_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nchw_relu_quant8_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1223,8 +1329,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu_quant8_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu1 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu1() {
+static std::vector<MixedTypedExample> examples_nchw_relu1 = {
 // Begin of an example
 {
 .operands = {
@@ -1257,8 +1366,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu1;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu1_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu1_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nchw_relu1_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1291,8 +1403,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu1_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu1_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu1_relaxed() {
+static std::vector<MixedTypedExample> examples_nchw_relu1_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -1325,8 +1440,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu1_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu1_relaxed_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu1_relaxed_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nchw_relu1_relaxed_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1359,8 +1477,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu1_relaxed_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu1_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu1_quant8() {
+static std::vector<MixedTypedExample> examples_nchw_relu1_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -1393,8 +1514,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu1_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu1_quant8_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu1_quant8_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nchw_relu1_quant8_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1427,8 +1551,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu1_quant8_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu6 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu6() {
+static std::vector<MixedTypedExample> examples_nchw_relu6 = {
 // Begin of an example
 {
 .operands = {
@@ -1461,8 +1588,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu6;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu6_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu6_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nchw_relu6_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1495,8 +1625,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu6_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu6_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu6_relaxed() {
+static std::vector<MixedTypedExample> examples_nchw_relu6_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -1529,8 +1662,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu6_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu6_relaxed_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu6_relaxed_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nchw_relu6_relaxed_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1563,8 +1699,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu6_relaxed_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu6_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu6_quant8() {
+static std::vector<MixedTypedExample> examples_nchw_relu6_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -1597,8 +1736,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu6_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relu6_quant8_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nchw_relu6_quant8_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nchw_relu6_quant8_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1631,8 +1773,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relu6_quant8_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nhwc = {
+std::vector<MixedTypedExample>& get_examples_nhwc() {
+static std::vector<MixedTypedExample> examples_nhwc = {
 // Begin of an example
 {
 .operands = {
@@ -1665,8 +1810,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nhwc_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nhwc_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1699,8 +1847,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -1733,8 +1884,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1767,8 +1921,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -1801,8 +1958,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1835,8 +1995,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nchw = {
+std::vector<MixedTypedExample>& get_examples_nchw() {
+static std::vector<MixedTypedExample> examples_nchw = {
 // Begin of an example
 {
 .operands = {
@@ -1869,8 +2032,11 @@
 },
 }, // End of an example
 };
+return examples_nchw;
+};
 
-std::vector<MixedTypedExample> examples_nchw_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nchw_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nchw_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1903,8 +2069,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -1937,8 +2106,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -1971,8 +2143,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8 = {
+std::vector<MixedTypedExample>& get_examples_nchw_quant8() {
+static std::vector<MixedTypedExample> examples_nchw_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -2005,8 +2180,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8_weight_as_input = {
+std::vector<MixedTypedExample>& get_examples_nchw_quant8_weight_as_input() {
+static std::vector<MixedTypedExample> examples_nchw_quant8_weight_as_input = {
 // Begin of an example
 {
 .operands = {
@@ -2039,8 +2217,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8_weight_as_input;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_2() {
+static std::vector<MixedTypedExample> examples_nhwc_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2073,8 +2254,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_weight_as_input_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_weight_as_input_2() {
+static std::vector<MixedTypedExample> examples_nhwc_weight_as_input_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2107,8 +2291,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_weight_as_input_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2141,8 +2328,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed_weight_as_input_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed_weight_as_input_2() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed_weight_as_input_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2175,8 +2365,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed_weight_as_input_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8_2() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2209,8 +2402,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8_weight_as_input_2 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8_weight_as_input_2() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8_weight_as_input_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2243,8 +2439,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8_weight_as_input_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_2() {
+static std::vector<MixedTypedExample> examples_nchw_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2277,8 +2476,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_weight_as_input_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_weight_as_input_2() {
+static std::vector<MixedTypedExample> examples_nchw_weight_as_input_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2311,8 +2513,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_weight_as_input_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed_2() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2345,8 +2550,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed_weight_as_input_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed_weight_as_input_2() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed_weight_as_input_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2379,8 +2587,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed_weight_as_input_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_quant8_2() {
+static std::vector<MixedTypedExample> examples_nchw_quant8_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2413,8 +2624,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8_2;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8_weight_as_input_2 = {
+std::vector<MixedTypedExample>& get_examples_nchw_quant8_weight_as_input_2() {
+static std::vector<MixedTypedExample> examples_nchw_quant8_weight_as_input_2 = {
 // Begin of an example
 {
 .operands = {
@@ -2447,8 +2661,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8_weight_as_input_2;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_3 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_3() {
+static std::vector<MixedTypedExample> examples_nhwc_3 = {
 // Begin of an example
 {
 .operands = {
@@ -2481,8 +2698,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_3;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_weight_as_input_3 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_weight_as_input_3() {
+static std::vector<MixedTypedExample> examples_nhwc_weight_as_input_3 = {
 // Begin of an example
 {
 .operands = {
@@ -2515,8 +2735,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_weight_as_input_3;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed_3 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed_3() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed_3 = {
 // Begin of an example
 {
 .operands = {
@@ -2549,8 +2772,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed_3;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed_weight_as_input_3 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed_weight_as_input_3() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed_weight_as_input_3 = {
 // Begin of an example
 {
 .operands = {
@@ -2583,8 +2809,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed_weight_as_input_3;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8_3 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8_3() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8_3 = {
 // Begin of an example
 {
 .operands = {
@@ -2617,8 +2846,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8_3;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8_weight_as_input_3 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8_weight_as_input_3() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8_weight_as_input_3 = {
 // Begin of an example
 {
 .operands = {
@@ -2651,8 +2883,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8_weight_as_input_3;
+};
 
-std::vector<MixedTypedExample> examples_nchw_3 = {
+std::vector<MixedTypedExample>& get_examples_nchw_3() {
+static std::vector<MixedTypedExample> examples_nchw_3 = {
 // Begin of an example
 {
 .operands = {
@@ -2685,8 +2920,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_3;
+};
 
-std::vector<MixedTypedExample> examples_nchw_weight_as_input_3 = {
+std::vector<MixedTypedExample>& get_examples_nchw_weight_as_input_3() {
+static std::vector<MixedTypedExample> examples_nchw_weight_as_input_3 = {
 // Begin of an example
 {
 .operands = {
@@ -2719,8 +2957,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_weight_as_input_3;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed_3 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed_3() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed_3 = {
 // Begin of an example
 {
 .operands = {
@@ -2753,8 +2994,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed_3;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed_weight_as_input_3 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed_weight_as_input_3() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed_weight_as_input_3 = {
 // Begin of an example
 {
 .operands = {
@@ -2787,8 +3031,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed_weight_as_input_3;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8_3 = {
+std::vector<MixedTypedExample>& get_examples_nchw_quant8_3() {
+static std::vector<MixedTypedExample> examples_nchw_quant8_3 = {
 // Begin of an example
 {
 .operands = {
@@ -2821,8 +3068,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8_3;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8_weight_as_input_3 = {
+std::vector<MixedTypedExample>& get_examples_nchw_quant8_weight_as_input_3() {
+static std::vector<MixedTypedExample> examples_nchw_quant8_weight_as_input_3 = {
 // Begin of an example
 {
 .operands = {
@@ -2855,8 +3105,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8_weight_as_input_3;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_4 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_4() {
+static std::vector<MixedTypedExample> examples_nhwc_4 = {
 // Begin of an example
 {
 .operands = {
@@ -2889,8 +3142,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_4;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_weight_as_input_4 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_weight_as_input_4() {
+static std::vector<MixedTypedExample> examples_nhwc_weight_as_input_4 = {
 // Begin of an example
 {
 .operands = {
@@ -2923,8 +3179,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_weight_as_input_4;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed_4 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed_4() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed_4 = {
 // Begin of an example
 {
 .operands = {
@@ -2957,8 +3216,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed_4;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_relaxed_weight_as_input_4 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_relaxed_weight_as_input_4() {
+static std::vector<MixedTypedExample> examples_nhwc_relaxed_weight_as_input_4 = {
 // Begin of an example
 {
 .operands = {
@@ -2991,8 +3253,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_relaxed_weight_as_input_4;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8_4 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8_4() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8_4 = {
 // Begin of an example
 {
 .operands = {
@@ -3025,8 +3290,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8_4;
+};
 
-std::vector<MixedTypedExample> examples_nhwc_quant8_weight_as_input_4 = {
+std::vector<MixedTypedExample>& get_examples_nhwc_quant8_weight_as_input_4() {
+static std::vector<MixedTypedExample> examples_nhwc_quant8_weight_as_input_4 = {
 // Begin of an example
 {
 .operands = {
@@ -3059,8 +3327,11 @@
 },
 }, // End of an example
 };
+return examples_nhwc_quant8_weight_as_input_4;
+};
 
-std::vector<MixedTypedExample> examples_nchw_4 = {
+std::vector<MixedTypedExample>& get_examples_nchw_4() {
+static std::vector<MixedTypedExample> examples_nchw_4 = {
 // Begin of an example
 {
 .operands = {
@@ -3093,8 +3364,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_4;
+};
 
-std::vector<MixedTypedExample> examples_nchw_weight_as_input_4 = {
+std::vector<MixedTypedExample>& get_examples_nchw_weight_as_input_4() {
+static std::vector<MixedTypedExample> examples_nchw_weight_as_input_4 = {
 // Begin of an example
 {
 .operands = {
@@ -3127,8 +3401,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_weight_as_input_4;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed_4 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed_4() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed_4 = {
 // Begin of an example
 {
 .operands = {
@@ -3161,8 +3438,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed_4;
+};
 
-std::vector<MixedTypedExample> examples_nchw_relaxed_weight_as_input_4 = {
+std::vector<MixedTypedExample>& get_examples_nchw_relaxed_weight_as_input_4() {
+static std::vector<MixedTypedExample> examples_nchw_relaxed_weight_as_input_4 = {
 // Begin of an example
 {
 .operands = {
@@ -3195,8 +3475,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_relaxed_weight_as_input_4;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8_4 = {
+std::vector<MixedTypedExample>& get_examples_nchw_quant8_4() {
+static std::vector<MixedTypedExample> examples_nchw_quant8_4 = {
 // Begin of an example
 {
 .operands = {
@@ -3229,8 +3512,11 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8_4;
+};
 
-std::vector<MixedTypedExample> examples_nchw_quant8_weight_as_input_4 = {
+std::vector<MixedTypedExample>& get_examples_nchw_quant8_weight_as_input_4() {
+static std::vector<MixedTypedExample> examples_nchw_quant8_weight_as_input_4 = {
 // Begin of an example
 {
 .operands = {
@@ -3263,4 +3549,6 @@
 },
 }, // End of an example
 };
+return examples_nchw_quant8_weight_as_input_4;
+};
 
diff --git a/nn/runtime/test/generated/examples/argmin_2_quant8.example.cpp b/nn/runtime/test/generated/examples/transpose_float16.example.cpp
similarity index 68%
rename from nn/runtime/test/generated/examples/argmin_2_quant8.example.cpp
rename to nn/runtime/test/generated/examples/transpose_float16.example.cpp
index ce68216..05ff734 100644
--- a/nn/runtime/test/generated/examples/argmin_2_quant8.example.cpp
+++ b/nn/runtime/test/generated/examples/transpose_float16.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
-// Generated file (from: argmin_2_quant8.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+// Generated file (from: transpose_float16.mod.py). Do not edit
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -11,26 +12,28 @@
   // int -> INT32 map
   {},
   // int -> QUANT8_ASYMM map
-  {{0, {1, 2, 4, 3}}},
+  {},
   // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
-  {},
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
 },
 //Output(s)
 { // See tools/test_generator/include/TestHarness.h:MixedTyped
   // int -> FLOAT32 map
   {},
   // int -> INT32 map
-  {{0, {0, 0}}},
+  {},
   // int -> QUANT8_ASYMM map
   {},
   // int -> QUANT16_SYMM map
   {},
   // int -> FLOAT16 map
-  {},
+  {{0, {1.0f, 3.0f, 2.0f, 4.0f}}},
 }
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/transpose_float_1.example.cpp b/nn/runtime/test/generated/examples/transpose_float_1.example.cpp
index 21972b6..7a75394 100644
--- a/nn/runtime/test/generated/examples/transpose_float_1.example.cpp
+++ b/nn/runtime/test/generated/examples/transpose_float_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: transpose_float_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/transpose_float_1_relaxed.example.cpp b/nn/runtime/test/generated/examples/transpose_float_1_relaxed.example.cpp
index 2427545..1c1078c 100644
--- a/nn/runtime/test/generated/examples/transpose_float_1_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/transpose_float_1_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: transpose_float_1_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/transpose_quant8_1.example.cpp b/nn/runtime/test/generated/examples/transpose_quant8_1.example.cpp
index ec03779..9653f63 100644
--- a/nn/runtime/test/generated/examples/transpose_quant8_1.example.cpp
+++ b/nn/runtime/test/generated/examples/transpose_quant8_1.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: transpose_quant8_1.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/transpose_relaxed.example.cpp b/nn/runtime/test/generated/examples/transpose_relaxed.example.cpp
index 159c5fa..9f7dcff 100644
--- a/nn/runtime/test/generated/examples/transpose_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/transpose_relaxed.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: transpose_relaxed.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,4 +34,6 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
diff --git a/nn/runtime/test/generated/examples/transpose_v1_2.example.cpp b/nn/runtime/test/generated/examples/transpose_v1_2.example.cpp
index cf91530..fa4abc0 100644
--- a/nn/runtime/test/generated/examples/transpose_v1_2.example.cpp
+++ b/nn/runtime/test/generated/examples/transpose_v1_2.example.cpp
@@ -1,6 +1,7 @@
 // clang-format off
 // Generated file (from: transpose_v1_2.mod.py). Do not edit
-std::vector<MixedTypedExample> examples = {
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
 // Begin of an example
 {
 .operands = {
@@ -33,8 +34,11 @@
 },
 }, // End of an example
 };
+return examples;
+};
 
-std::vector<MixedTypedExample> examples_relaxed = {
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
 // Begin of an example
 {
 .operands = {
@@ -67,8 +71,11 @@
 },
 }, // End of an example
 };
+return examples_relaxed;
+};
 
-std::vector<MixedTypedExample> examples_quant8 = {
+std::vector<MixedTypedExample>& get_examples_quant8() {
+static std::vector<MixedTypedExample> examples_quant8 = {
 // Begin of an example
 {
 .operands = {
@@ -101,4 +108,6 @@
 },
 }, // End of an example
 };
+return examples_quant8;
+};
 
diff --git a/nn/runtime/test/generated/models/abs.model.cpp b/nn/runtime/test/generated/models/abs.model.cpp
new file mode 100644
index 0000000..8aa5748
--- /dev/null
+++ b/nn/runtime/test/generated/models/abs.model.cpp
@@ -0,0 +1,61 @@
+// clang-format off
+// Generated file (from: abs.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 3, 4, 5});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type0);
+  auto output0 = model->addOperand(&type0);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_ABS, {input0}, {output0});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output0});
+  assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 3, 4, 5});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type0);
+  auto output0 = model->addOperand(&type0);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_ABS, {input0}, {output0});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output0});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16(Model *model) {
+  OperandType type1(Type::TENSOR_FLOAT16, {1, 2, 3, 4, 5});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type1);
+  auto output0 = model->addOperand(&type1);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_ABS, {input0}, {output0});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output0});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/models/argmax_1.model.cpp b/nn/runtime/test/generated/models/argmax_1.model.cpp
new file mode 100644
index 0000000..b658028
--- /dev/null
+++ b/nn/runtime/test/generated/models/argmax_1.model.cpp
@@ -0,0 +1,124 @@
+// clang-format off
+// Generated file (from: argmax_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type0);
+  auto axis = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis_init[] = {1};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ARGMAX, {input0, axis}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type0);
+  auto axis = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis_init[] = {1};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ARGMAX, {input0, axis}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  OperandType type3(Type::TENSOR_FLOAT16, {2, 2});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type3);
+  auto axis = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis_init[] = {1};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ARGMAX, {input0, axis}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_int32(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  OperandType type4(Type::TENSOR_INT32, {2, 2});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type4);
+  auto axis = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis_init[] = {1};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ARGMAX, {input0, axis}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_int32(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_quant8(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 1.0f, 0);
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type5);
+  auto axis = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis_init[] = {1};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ARGMAX, {input0, axis}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/models/argmax_1_float.model.cpp b/nn/runtime/test/generated/models/argmax_1_float.model.cpp
deleted file mode 100644
index c13b36d..0000000
--- a/nn/runtime/test/generated/models/argmax_1_float.model.cpp
+++ /dev/null
@@ -1,26 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_1_float.mod.py). Do not edit
-void CreateModel(Model *model) {
-  OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
-  OperandType type1(Type::INT32, {});
-  OperandType type2(Type::TENSOR_INT32, {2});
-  // Phase 1, operands
-  auto input0 = model->addOperand(&type0);
-  auto axis = model->addOperand(&type1);
-  auto output = model->addOperand(&type2);
-  // Phase 2, operations
-  static int32_t axis_init[] = {1};
-  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_ARGMAX, {input0, axis}, {output});
-  // Phase 3, inputs and outputs
-  model->identifyInputsAndOutputs(
-    {input0},
-    {output});
-  assert(model->isValid());
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/models/argmax_1_float_relaxed.model.cpp b/nn/runtime/test/generated/models/argmax_1_float_relaxed.model.cpp
deleted file mode 100644
index cbdbc36..0000000
--- a/nn/runtime/test/generated/models/argmax_1_float_relaxed.model.cpp
+++ /dev/null
@@ -1,28 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_1_float_relaxed.mod.py). Do not edit
-void CreateModel(Model *model) {
-  OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
-  OperandType type1(Type::INT32, {});
-  OperandType type2(Type::TENSOR_INT32, {2});
-  // Phase 1, operands
-  auto input0 = model->addOperand(&type0);
-  auto axis = model->addOperand(&type1);
-  auto output = model->addOperand(&type2);
-  // Phase 2, operations
-  static int32_t axis_init[] = {1};
-  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_ARGMAX, {input0, axis}, {output});
-  // Phase 3, inputs and outputs
-  model->identifyInputsAndOutputs(
-    {input0},
-    {output});
-  // Phase 4: set relaxed execution
-  model->relaxComputationFloat32toFloat16(true);
-  assert(model->isValid());
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/models/argmax_1_int32.model.cpp b/nn/runtime/test/generated/models/argmax_1_int32.model.cpp
deleted file mode 100644
index 4125f1c..0000000
--- a/nn/runtime/test/generated/models/argmax_1_int32.model.cpp
+++ /dev/null
@@ -1,26 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_1_int32.mod.py). Do not edit
-void CreateModel(Model *model) {
-  OperandType type0(Type::TENSOR_INT32, {2, 2});
-  OperandType type1(Type::INT32, {});
-  OperandType type2(Type::TENSOR_INT32, {2});
-  // Phase 1, operands
-  auto input0 = model->addOperand(&type0);
-  auto axis = model->addOperand(&type1);
-  auto output = model->addOperand(&type2);
-  // Phase 2, operations
-  static int32_t axis_init[] = {1};
-  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_ARGMAX, {input0, axis}, {output});
-  // Phase 3, inputs and outputs
-  model->identifyInputsAndOutputs(
-    {input0},
-    {output});
-  assert(model->isValid());
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/models/argmax_1_quant8.model.cpp b/nn/runtime/test/generated/models/argmax_1_quant8.model.cpp
deleted file mode 100644
index de432ad..0000000
--- a/nn/runtime/test/generated/models/argmax_1_quant8.model.cpp
+++ /dev/null
@@ -1,26 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_1_quant8.mod.py). Do not edit
-void CreateModel(Model *model) {
-  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 1.0f, 0);
-  OperandType type1(Type::INT32, {});
-  OperandType type2(Type::TENSOR_INT32, {2});
-  // Phase 1, operands
-  auto input0 = model->addOperand(&type0);
-  auto axis = model->addOperand(&type1);
-  auto output = model->addOperand(&type2);
-  // Phase 2, operations
-  static int32_t axis_init[] = {1};
-  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_ARGMAX, {input0, axis}, {output});
-  // Phase 3, inputs and outputs
-  model->identifyInputsAndOutputs(
-    {input0},
-    {output});
-  assert(model->isValid());
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/models/argmax_2.model.cpp b/nn/runtime/test/generated/models/argmax_2.model.cpp
new file mode 100644
index 0000000..0d14e55
--- /dev/null
+++ b/nn/runtime/test/generated/models/argmax_2.model.cpp
@@ -0,0 +1,124 @@
+// clang-format off
+// Generated file (from: argmax_2.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type0);
+  auto axis = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis_init[] = {0};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ARGMAX, {input0, axis}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type0);
+  auto axis = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis_init[] = {0};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ARGMAX, {input0, axis}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  OperandType type3(Type::TENSOR_FLOAT16, {2, 2});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type3);
+  auto axis = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis_init[] = {0};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ARGMAX, {input0, axis}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_int32(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  OperandType type4(Type::TENSOR_INT32, {2, 2});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type4);
+  auto axis = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis_init[] = {0};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ARGMAX, {input0, axis}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_int32(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_quant8(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 1.0f, 0);
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type5);
+  auto axis = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis_init[] = {0};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ARGMAX, {input0, axis}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/models/argmax_2_float.model.cpp b/nn/runtime/test/generated/models/argmax_2_float.model.cpp
deleted file mode 100644
index 086eb55..0000000
--- a/nn/runtime/test/generated/models/argmax_2_float.model.cpp
+++ /dev/null
@@ -1,26 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_2_float.mod.py). Do not edit
-void CreateModel(Model *model) {
-  OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
-  OperandType type1(Type::INT32, {});
-  OperandType type2(Type::TENSOR_INT32, {2});
-  // Phase 1, operands
-  auto input0 = model->addOperand(&type0);
-  auto axis = model->addOperand(&type1);
-  auto output = model->addOperand(&type2);
-  // Phase 2, operations
-  static int32_t axis_init[] = {0};
-  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_ARGMAX, {input0, axis}, {output});
-  // Phase 3, inputs and outputs
-  model->identifyInputsAndOutputs(
-    {input0},
-    {output});
-  assert(model->isValid());
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/models/argmax_2_float_relaxed.model.cpp b/nn/runtime/test/generated/models/argmax_2_float_relaxed.model.cpp
deleted file mode 100644
index 0fdf792..0000000
--- a/nn/runtime/test/generated/models/argmax_2_float_relaxed.model.cpp
+++ /dev/null
@@ -1,28 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_2_float_relaxed.mod.py). Do not edit
-void CreateModel(Model *model) {
-  OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
-  OperandType type1(Type::INT32, {});
-  OperandType type2(Type::TENSOR_INT32, {2});
-  // Phase 1, operands
-  auto input0 = model->addOperand(&type0);
-  auto axis = model->addOperand(&type1);
-  auto output = model->addOperand(&type2);
-  // Phase 2, operations
-  static int32_t axis_init[] = {0};
-  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_ARGMAX, {input0, axis}, {output});
-  // Phase 3, inputs and outputs
-  model->identifyInputsAndOutputs(
-    {input0},
-    {output});
-  // Phase 4: set relaxed execution
-  model->relaxComputationFloat32toFloat16(true);
-  assert(model->isValid());
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/models/argmax_2_int32.model.cpp b/nn/runtime/test/generated/models/argmax_2_int32.model.cpp
deleted file mode 100644
index cd9bc09..0000000
--- a/nn/runtime/test/generated/models/argmax_2_int32.model.cpp
+++ /dev/null
@@ -1,26 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_2_int32.mod.py). Do not edit
-void CreateModel(Model *model) {
-  OperandType type0(Type::TENSOR_INT32, {2, 2});
-  OperandType type1(Type::INT32, {});
-  OperandType type2(Type::TENSOR_INT32, {2});
-  // Phase 1, operands
-  auto input0 = model->addOperand(&type0);
-  auto axis = model->addOperand(&type1);
-  auto output = model->addOperand(&type2);
-  // Phase 2, operations
-  static int32_t axis_init[] = {0};
-  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_ARGMAX, {input0, axis}, {output});
-  // Phase 3, inputs and outputs
-  model->identifyInputsAndOutputs(
-    {input0},
-    {output});
-  assert(model->isValid());
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/models/argmax_2_quant8.model.cpp b/nn/runtime/test/generated/models/argmax_2_quant8.model.cpp
deleted file mode 100644
index 092183b..0000000
--- a/nn/runtime/test/generated/models/argmax_2_quant8.model.cpp
+++ /dev/null
@@ -1,26 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_2_quant8.mod.py). Do not edit
-void CreateModel(Model *model) {
-  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 1.0f, 0);
-  OperandType type1(Type::INT32, {});
-  OperandType type2(Type::TENSOR_INT32, {2});
-  // Phase 1, operands
-  auto input0 = model->addOperand(&type0);
-  auto axis = model->addOperand(&type1);
-  auto output = model->addOperand(&type2);
-  // Phase 2, operations
-  static int32_t axis_init[] = {0};
-  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_ARGMAX, {input0, axis}, {output});
-  // Phase 3, inputs and outputs
-  model->identifyInputsAndOutputs(
-    {input0},
-    {output});
-  assert(model->isValid());
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/models/argmax_3.model.cpp b/nn/runtime/test/generated/models/argmax_3.model.cpp
new file mode 100644
index 0000000..5e30d83
--- /dev/null
+++ b/nn/runtime/test/generated/models/argmax_3.model.cpp
@@ -0,0 +1,124 @@
+// clang-format off
+// Generated file (from: argmax_3.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type0);
+  auto axis = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis_init[] = {-1};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ARGMAX, {input0, axis}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type0);
+  auto axis = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis_init[] = {-1};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ARGMAX, {input0, axis}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  OperandType type3(Type::TENSOR_FLOAT16, {2, 2});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type3);
+  auto axis = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis_init[] = {-1};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ARGMAX, {input0, axis}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_int32(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  OperandType type4(Type::TENSOR_INT32, {2, 2});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type4);
+  auto axis = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis_init[] = {-1};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ARGMAX, {input0, axis}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_int32(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_quant8(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 1.0f, 0);
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type5);
+  auto axis = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis_init[] = {-1};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ARGMAX, {input0, axis}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/models/argmax_3_float.model.cpp b/nn/runtime/test/generated/models/argmax_3_float.model.cpp
deleted file mode 100644
index 48c2a8b..0000000
--- a/nn/runtime/test/generated/models/argmax_3_float.model.cpp
+++ /dev/null
@@ -1,26 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_3_float.mod.py). Do not edit
-void CreateModel(Model *model) {
-  OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
-  OperandType type1(Type::INT32, {});
-  OperandType type2(Type::TENSOR_INT32, {2});
-  // Phase 1, operands
-  auto input0 = model->addOperand(&type0);
-  auto axis = model->addOperand(&type1);
-  auto output = model->addOperand(&type2);
-  // Phase 2, operations
-  static int32_t axis_init[] = {-1};
-  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_ARGMAX, {input0, axis}, {output});
-  // Phase 3, inputs and outputs
-  model->identifyInputsAndOutputs(
-    {input0},
-    {output});
-  assert(model->isValid());
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/models/argmin_1.model.cpp b/nn/runtime/test/generated/models/argmin_1.model.cpp
new file mode 100644
index 0000000..ae863ce
--- /dev/null
+++ b/nn/runtime/test/generated/models/argmin_1.model.cpp
@@ -0,0 +1,124 @@
+// clang-format off
+// Generated file (from: argmin_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type0);
+  auto axis = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis_init[] = {1};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type0);
+  auto axis = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis_init[] = {1};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  OperandType type3(Type::TENSOR_FLOAT16, {2, 2});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type3);
+  auto axis = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis_init[] = {1};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_int32(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  OperandType type4(Type::TENSOR_INT32, {2, 2});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type4);
+  auto axis = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis_init[] = {1};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_int32(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_quant8(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 1.0f, 0);
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type5);
+  auto axis = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis_init[] = {1};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/models/argmin_1_float.model.cpp b/nn/runtime/test/generated/models/argmin_1_float.model.cpp
deleted file mode 100644
index 5a8463f..0000000
--- a/nn/runtime/test/generated/models/argmin_1_float.model.cpp
+++ /dev/null
@@ -1,26 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_1_float.mod.py). Do not edit
-void CreateModel(Model *model) {
-  OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
-  OperandType type1(Type::INT32, {});
-  OperandType type2(Type::TENSOR_INT32, {2});
-  // Phase 1, operands
-  auto input0 = model->addOperand(&type0);
-  auto axis = model->addOperand(&type1);
-  auto output = model->addOperand(&type2);
-  // Phase 2, operations
-  static int32_t axis_init[] = {1};
-  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
-  // Phase 3, inputs and outputs
-  model->identifyInputsAndOutputs(
-    {input0},
-    {output});
-  assert(model->isValid());
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/models/argmin_1_float_relaxed.model.cpp b/nn/runtime/test/generated/models/argmin_1_float_relaxed.model.cpp
deleted file mode 100644
index ce9b2ae..0000000
--- a/nn/runtime/test/generated/models/argmin_1_float_relaxed.model.cpp
+++ /dev/null
@@ -1,28 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_1_float_relaxed.mod.py). Do not edit
-void CreateModel(Model *model) {
-  OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
-  OperandType type1(Type::INT32, {});
-  OperandType type2(Type::TENSOR_INT32, {2});
-  // Phase 1, operands
-  auto input0 = model->addOperand(&type0);
-  auto axis = model->addOperand(&type1);
-  auto output = model->addOperand(&type2);
-  // Phase 2, operations
-  static int32_t axis_init[] = {1};
-  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
-  // Phase 3, inputs and outputs
-  model->identifyInputsAndOutputs(
-    {input0},
-    {output});
-  // Phase 4: set relaxed execution
-  model->relaxComputationFloat32toFloat16(true);
-  assert(model->isValid());
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/models/argmin_1_int32.model.cpp b/nn/runtime/test/generated/models/argmin_1_int32.model.cpp
deleted file mode 100644
index 146c3d9..0000000
--- a/nn/runtime/test/generated/models/argmin_1_int32.model.cpp
+++ /dev/null
@@ -1,26 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_1_int32.mod.py). Do not edit
-void CreateModel(Model *model) {
-  OperandType type0(Type::TENSOR_INT32, {2, 2});
-  OperandType type1(Type::INT32, {});
-  OperandType type2(Type::TENSOR_INT32, {2});
-  // Phase 1, operands
-  auto input0 = model->addOperand(&type0);
-  auto axis = model->addOperand(&type1);
-  auto output = model->addOperand(&type2);
-  // Phase 2, operations
-  static int32_t axis_init[] = {1};
-  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
-  // Phase 3, inputs and outputs
-  model->identifyInputsAndOutputs(
-    {input0},
-    {output});
-  assert(model->isValid());
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/models/argmin_1_quant8.model.cpp b/nn/runtime/test/generated/models/argmin_1_quant8.model.cpp
deleted file mode 100644
index aaf7180..0000000
--- a/nn/runtime/test/generated/models/argmin_1_quant8.model.cpp
+++ /dev/null
@@ -1,26 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_1_quant8.mod.py). Do not edit
-void CreateModel(Model *model) {
-  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 1.0f, 0);
-  OperandType type1(Type::INT32, {});
-  OperandType type2(Type::TENSOR_INT32, {2});
-  // Phase 1, operands
-  auto input0 = model->addOperand(&type0);
-  auto axis = model->addOperand(&type1);
-  auto output = model->addOperand(&type2);
-  // Phase 2, operations
-  static int32_t axis_init[] = {1};
-  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
-  // Phase 3, inputs and outputs
-  model->identifyInputsAndOutputs(
-    {input0},
-    {output});
-  assert(model->isValid());
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/models/argmin_2.model.cpp b/nn/runtime/test/generated/models/argmin_2.model.cpp
new file mode 100644
index 0000000..7be0582
--- /dev/null
+++ b/nn/runtime/test/generated/models/argmin_2.model.cpp
@@ -0,0 +1,124 @@
+// clang-format off
+// Generated file (from: argmin_2.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type0);
+  auto axis = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis_init[] = {0};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type0);
+  auto axis = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis_init[] = {0};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  OperandType type3(Type::TENSOR_FLOAT16, {2, 2});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type3);
+  auto axis = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis_init[] = {0};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_int32(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  OperandType type4(Type::TENSOR_INT32, {2, 2});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type4);
+  auto axis = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis_init[] = {0};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_int32(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_quant8(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 1.0f, 0);
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type5);
+  auto axis = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis_init[] = {0};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/models/argmin_2_float.model.cpp b/nn/runtime/test/generated/models/argmin_2_float.model.cpp
deleted file mode 100644
index 213dfdd..0000000
--- a/nn/runtime/test/generated/models/argmin_2_float.model.cpp
+++ /dev/null
@@ -1,26 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_2_float.mod.py). Do not edit
-void CreateModel(Model *model) {
-  OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
-  OperandType type1(Type::INT32, {});
-  OperandType type2(Type::TENSOR_INT32, {2});
-  // Phase 1, operands
-  auto input0 = model->addOperand(&type0);
-  auto axis = model->addOperand(&type1);
-  auto output = model->addOperand(&type2);
-  // Phase 2, operations
-  static int32_t axis_init[] = {0};
-  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
-  // Phase 3, inputs and outputs
-  model->identifyInputsAndOutputs(
-    {input0},
-    {output});
-  assert(model->isValid());
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/models/argmin_2_float_relaxed.model.cpp b/nn/runtime/test/generated/models/argmin_2_float_relaxed.model.cpp
deleted file mode 100644
index 3274a70..0000000
--- a/nn/runtime/test/generated/models/argmin_2_float_relaxed.model.cpp
+++ /dev/null
@@ -1,28 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_2_float_relaxed.mod.py). Do not edit
-void CreateModel(Model *model) {
-  OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
-  OperandType type1(Type::INT32, {});
-  OperandType type2(Type::TENSOR_INT32, {2});
-  // Phase 1, operands
-  auto input0 = model->addOperand(&type0);
-  auto axis = model->addOperand(&type1);
-  auto output = model->addOperand(&type2);
-  // Phase 2, operations
-  static int32_t axis_init[] = {0};
-  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
-  // Phase 3, inputs and outputs
-  model->identifyInputsAndOutputs(
-    {input0},
-    {output});
-  // Phase 4: set relaxed execution
-  model->relaxComputationFloat32toFloat16(true);
-  assert(model->isValid());
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/models/argmin_2_int32.model.cpp b/nn/runtime/test/generated/models/argmin_2_int32.model.cpp
deleted file mode 100644
index af43892..0000000
--- a/nn/runtime/test/generated/models/argmin_2_int32.model.cpp
+++ /dev/null
@@ -1,26 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_2_int32.mod.py). Do not edit
-void CreateModel(Model *model) {
-  OperandType type0(Type::TENSOR_INT32, {2, 2});
-  OperandType type1(Type::INT32, {});
-  OperandType type2(Type::TENSOR_INT32, {2});
-  // Phase 1, operands
-  auto input0 = model->addOperand(&type0);
-  auto axis = model->addOperand(&type1);
-  auto output = model->addOperand(&type2);
-  // Phase 2, operations
-  static int32_t axis_init[] = {0};
-  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
-  // Phase 3, inputs and outputs
-  model->identifyInputsAndOutputs(
-    {input0},
-    {output});
-  assert(model->isValid());
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/models/argmin_2_quant8.model.cpp b/nn/runtime/test/generated/models/argmin_2_quant8.model.cpp
deleted file mode 100644
index 16804f4..0000000
--- a/nn/runtime/test/generated/models/argmin_2_quant8.model.cpp
+++ /dev/null
@@ -1,26 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_2_quant8.mod.py). Do not edit
-void CreateModel(Model *model) {
-  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 1.0f, 0);
-  OperandType type1(Type::INT32, {});
-  OperandType type2(Type::TENSOR_INT32, {2});
-  // Phase 1, operands
-  auto input0 = model->addOperand(&type0);
-  auto axis = model->addOperand(&type1);
-  auto output = model->addOperand(&type2);
-  // Phase 2, operations
-  static int32_t axis_init[] = {0};
-  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
-  // Phase 3, inputs and outputs
-  model->identifyInputsAndOutputs(
-    {input0},
-    {output});
-  assert(model->isValid());
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/models/argmin_3.model.cpp b/nn/runtime/test/generated/models/argmin_3.model.cpp
new file mode 100644
index 0000000..101f3c8
--- /dev/null
+++ b/nn/runtime/test/generated/models/argmin_3.model.cpp
@@ -0,0 +1,124 @@
+// clang-format off
+// Generated file (from: argmin_3.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type0);
+  auto axis = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis_init[] = {-1};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type0);
+  auto axis = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis_init[] = {-1};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  OperandType type3(Type::TENSOR_FLOAT16, {2, 2});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type3);
+  auto axis = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis_init[] = {-1};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_int32(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  OperandType type4(Type::TENSOR_INT32, {2, 2});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type4);
+  auto axis = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis_init[] = {-1};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_int32(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_quant8(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 1.0f, 0);
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type5);
+  auto axis = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis_init[] = {-1};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/models/argmin_3_float.model.cpp b/nn/runtime/test/generated/models/argmin_3_float.model.cpp
deleted file mode 100644
index 86231d8..0000000
--- a/nn/runtime/test/generated/models/argmin_3_float.model.cpp
+++ /dev/null
@@ -1,26 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_3_float.mod.py). Do not edit
-void CreateModel(Model *model) {
-  OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
-  OperandType type1(Type::INT32, {});
-  OperandType type2(Type::TENSOR_INT32, {2});
-  // Phase 1, operands
-  auto input0 = model->addOperand(&type0);
-  auto axis = model->addOperand(&type1);
-  auto output = model->addOperand(&type2);
-  // Phase 2, operations
-  static int32_t axis_init[] = {-1};
-  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
-  // Phase 3, inputs and outputs
-  model->identifyInputsAndOutputs(
-    {input0},
-    {output});
-  assert(model->isValid());
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/models/batch_to_space_v1_2.model.cpp b/nn/runtime/test/generated/models/batch_to_space_v1_2.model.cpp
index cbdbee9..cb1615b 100644
--- a/nn/runtime/test/generated/models/batch_to_space_v1_2.model.cpp
+++ b/nn/runtime/test/generated/models/batch_to_space_v1_2.model.cpp
@@ -58,11 +58,11 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nhwc_quant8(Model *model) {
+void CreateModel_nhwc_float16(Model *model) {
   OperandType type0(Type::BOOL, {});
   OperandType type3(Type::TENSOR_INT32, {2});
-  OperandType type6(Type::TENSOR_QUANT8_ASYMM, {4, 1, 1, 2}, 0.1f, 0);
-  OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.1f, 0);
+  OperandType type6(Type::TENSOR_FLOAT16, {4, 1, 1, 2});
+  OperandType type7(Type::TENSOR_FLOAT16, {1, 2, 2, 2});
   // Phase 1, operands
   auto op1 = model->addOperand(&type6);
   auto param = model->addOperand(&type3);
@@ -81,6 +81,34 @@
   assert(model->isValid());
 }
 
+inline bool is_ignored_nhwc_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nhwc_quant8(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type3(Type::TENSOR_INT32, {2});
+  OperandType type8(Type::TENSOR_QUANT8_ASYMM, {4, 1, 1, 2}, 0.1f, 0);
+  OperandType type9(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.1f, 0);
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type8);
+  auto param = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op4 = model->addOperand(&type9);
+  // Phase 2, operations
+  static int32_t param_init[] = {2, 2};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op1, param, layout}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op4});
+  assert(model->isValid());
+}
+
 inline bool is_ignored_nhwc_quant8(int i) {
   static std::set<int> ignore = {};
   return ignore.find(i) != ignore.end();
@@ -88,11 +116,11 @@
 
 void CreateModel_nchw(Model *model) {
   OperandType type0(Type::BOOL, {});
+  OperandType type10(Type::TENSOR_FLOAT32, {4, 2, 1, 1});
   OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
   OperandType type3(Type::TENSOR_INT32, {2});
-  OperandType type8(Type::TENSOR_FLOAT32, {4, 2, 1, 1});
   // Phase 1, operands
-  auto op1 = model->addOperand(&type8);
+  auto op1 = model->addOperand(&type10);
   auto param = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
   auto op4 = model->addOperand(&type2);
@@ -116,11 +144,11 @@
 
 void CreateModel_nchw_relaxed(Model *model) {
   OperandType type0(Type::BOOL, {});
+  OperandType type10(Type::TENSOR_FLOAT32, {4, 2, 1, 1});
   OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
   OperandType type3(Type::TENSOR_INT32, {2});
-  OperandType type8(Type::TENSOR_FLOAT32, {4, 2, 1, 1});
   // Phase 1, operands
-  auto op1 = model->addOperand(&type8);
+  auto op1 = model->addOperand(&type10);
   auto param = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
   auto op4 = model->addOperand(&type2);
@@ -144,13 +172,13 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nchw_quant8(Model *model) {
+void CreateModel_nchw_float16(Model *model) {
   OperandType type0(Type::BOOL, {});
+  OperandType type11(Type::TENSOR_FLOAT16, {4, 2, 1, 1});
   OperandType type3(Type::TENSOR_INT32, {2});
-  OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.1f, 0);
-  OperandType type9(Type::TENSOR_QUANT8_ASYMM, {4, 2, 1, 1}, 0.1f, 0);
+  OperandType type7(Type::TENSOR_FLOAT16, {1, 2, 2, 2});
   // Phase 1, operands
-  auto op1 = model->addOperand(&type9);
+  auto op1 = model->addOperand(&type11);
   auto param = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
   auto op4 = model->addOperand(&type7);
@@ -167,6 +195,34 @@
   assert(model->isValid());
 }
 
+inline bool is_ignored_nchw_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nchw_quant8(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type12(Type::TENSOR_QUANT8_ASYMM, {4, 2, 1, 1}, 0.1f, 0);
+  OperandType type3(Type::TENSOR_INT32, {2});
+  OperandType type9(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.1f, 0);
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type12);
+  auto param = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op4 = model->addOperand(&type9);
+  // Phase 2, operations
+  static int32_t param_init[] = {2, 2};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op1, param, layout}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op4});
+  assert(model->isValid());
+}
+
 inline bool is_ignored_nchw_quant8(int i) {
   static std::set<int> ignore = {};
   return ignore.find(i) != ignore.end();
@@ -230,16 +286,44 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nhwc_quant8_2(Model *model) {
+void CreateModel_nhwc_float16_2(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type10(Type::TENSOR_QUANT8_ASYMM, {4, 2, 2, 1}, 0.5f, 128);
-  OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 1}, 0.5f, 128);
+  OperandType type13(Type::TENSOR_FLOAT16, {4, 2, 2, 1});
+  OperandType type14(Type::TENSOR_FLOAT16, {1, 4, 4, 1});
   OperandType type3(Type::TENSOR_INT32, {2});
   // Phase 1, operands
-  auto op11 = model->addOperand(&type10);
+  auto op11 = model->addOperand(&type13);
   auto param1 = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
-  auto op41 = model->addOperand(&type11);
+  auto op41 = model->addOperand(&type14);
+  // Phase 2, operations
+  static int32_t param1_init[] = {2, 2};
+  model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op11, param1, layout}, {op41});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op11},
+    {op41});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nhwc_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nhwc_quant8_2(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type15(Type::TENSOR_QUANT8_ASYMM, {4, 2, 2, 1}, 0.5f, 128);
+  OperandType type16(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 1}, 0.5f, 128);
+  OperandType type3(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto op11 = model->addOperand(&type15);
+  auto param1 = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op41 = model->addOperand(&type16);
   // Phase 2, operations
   static int32_t param1_init[] = {2, 2};
   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
@@ -260,14 +344,14 @@
 
 void CreateModel_nchw_2(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type12(Type::TENSOR_FLOAT32, {4, 1, 2, 2});
-  OperandType type13(Type::TENSOR_FLOAT32, {1, 1, 4, 4});
+  OperandType type17(Type::TENSOR_FLOAT32, {4, 1, 2, 2});
+  OperandType type18(Type::TENSOR_FLOAT32, {1, 1, 4, 4});
   OperandType type3(Type::TENSOR_INT32, {2});
   // Phase 1, operands
-  auto op11 = model->addOperand(&type12);
+  auto op11 = model->addOperand(&type17);
   auto param1 = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
-  auto op41 = model->addOperand(&type13);
+  auto op41 = model->addOperand(&type18);
   // Phase 2, operations
   static int32_t param1_init[] = {2, 2};
   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
@@ -288,14 +372,14 @@
 
 void CreateModel_nchw_relaxed_2(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type12(Type::TENSOR_FLOAT32, {4, 1, 2, 2});
-  OperandType type13(Type::TENSOR_FLOAT32, {1, 1, 4, 4});
+  OperandType type17(Type::TENSOR_FLOAT32, {4, 1, 2, 2});
+  OperandType type18(Type::TENSOR_FLOAT32, {1, 1, 4, 4});
   OperandType type3(Type::TENSOR_INT32, {2});
   // Phase 1, operands
-  auto op11 = model->addOperand(&type12);
+  auto op11 = model->addOperand(&type17);
   auto param1 = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
-  auto op41 = model->addOperand(&type13);
+  auto op41 = model->addOperand(&type18);
   // Phase 2, operations
   static int32_t param1_init[] = {2, 2};
   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
@@ -316,16 +400,44 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nchw_quant8_2(Model *model) {
+void CreateModel_nchw_float16_2(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type14(Type::TENSOR_QUANT8_ASYMM, {4, 1, 2, 2}, 0.5f, 128);
-  OperandType type15(Type::TENSOR_QUANT8_ASYMM, {1, 1, 4, 4}, 0.5f, 128);
+  OperandType type19(Type::TENSOR_FLOAT16, {4, 1, 2, 2});
+  OperandType type20(Type::TENSOR_FLOAT16, {1, 1, 4, 4});
   OperandType type3(Type::TENSOR_INT32, {2});
   // Phase 1, operands
-  auto op11 = model->addOperand(&type14);
+  auto op11 = model->addOperand(&type19);
   auto param1 = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
-  auto op41 = model->addOperand(&type15);
+  auto op41 = model->addOperand(&type20);
+  // Phase 2, operations
+  static int32_t param1_init[] = {2, 2};
+  model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op11, param1, layout}, {op41});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op11},
+    {op41});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nchw_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nchw_quant8_2(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type21(Type::TENSOR_QUANT8_ASYMM, {4, 1, 2, 2}, 0.5f, 128);
+  OperandType type22(Type::TENSOR_QUANT8_ASYMM, {1, 1, 4, 4}, 0.5f, 128);
+  OperandType type3(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto op11 = model->addOperand(&type21);
+  auto param1 = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op41 = model->addOperand(&type22);
   // Phase 2, operations
   static int32_t param1_init[] = {2, 2};
   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
diff --git a/nn/runtime/test/generated/models/concat_float16_1.model.cpp b/nn/runtime/test/generated/models/concat_float16_1.model.cpp
new file mode 100644
index 0000000..ea11bf6
--- /dev/null
+++ b/nn/runtime/test/generated/models/concat_float16_1.model.cpp
@@ -0,0 +1,27 @@
+// clang-format off
+// Generated file (from: concat_float16_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT16, {2, 3});
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_FLOAT16, {4, 3});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type0);
+  auto axis0 = model->addOperand(&type1);
+  auto result = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t axis0_init[] = {0};
+  model->setOperandValue(axis0, axis0_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_CONCATENATION, {op1, op2, axis0}, {result});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1, op2},
+    {result});
+  assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/models/concat_float16_2.model.cpp b/nn/runtime/test/generated/models/concat_float16_2.model.cpp
new file mode 100644
index 0000000..be5d0d8
--- /dev/null
+++ b/nn/runtime/test/generated/models/concat_float16_2.model.cpp
@@ -0,0 +1,28 @@
+// clang-format off
+// Generated file (from: concat_float16_2.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT16, {52, 230});
+  OperandType type1(Type::TENSOR_FLOAT16, {40, 230});
+  OperandType type2(Type::INT32, {});
+  OperandType type3(Type::TENSOR_FLOAT16, {92, 230});
+  // Phase 1, operands
+  auto input1 = model->addOperand(&type0);
+  auto input2 = model->addOperand(&type1);
+  auto axis0 = model->addOperand(&type2);
+  auto output = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t axis0_init[] = {0};
+  model->setOperandValue(axis0, axis0_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_CONCATENATION, {input1, input2, axis0}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input1, input2},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/models/concat_float16_3.model.cpp b/nn/runtime/test/generated/models/concat_float16_3.model.cpp
new file mode 100644
index 0000000..8556ad6
--- /dev/null
+++ b/nn/runtime/test/generated/models/concat_float16_3.model.cpp
@@ -0,0 +1,28 @@
+// clang-format off
+// Generated file (from: concat_float16_3.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT16, {212, 60});
+  OperandType type1(Type::TENSOR_FLOAT16, {212, 30});
+  OperandType type2(Type::INT32, {});
+  OperandType type3(Type::TENSOR_FLOAT16, {212, 90});
+  // Phase 1, operands
+  auto input1 = model->addOperand(&type0);
+  auto input2 = model->addOperand(&type1);
+  auto axis1 = model->addOperand(&type2);
+  auto output = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t axis1_init[] = {1};
+  model->setOperandValue(axis1, axis1_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_CONCATENATION, {input1, input2, axis1}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input1, input2},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/models/concat_mixed_quant.model.cpp b/nn/runtime/test/generated/models/concat_mixed_quant.model.cpp
new file mode 100644
index 0000000..0eac8da
--- /dev/null
+++ b/nn/runtime/test/generated/models/concat_mixed_quant.model.cpp
@@ -0,0 +1,62 @@
+// clang-format off
+// Generated file (from: concat_mixed_quant.mod.py). Do not edit
+void CreateModel_quant8(Model *model) {
+  OperandType type2(Type::INT32, {});
+  OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2, 1, 2}, 0.084f, 127);
+  OperandType type4(Type::TENSOR_QUANT8_ASYMM, {2, 1, 2}, 0.05f, 0);
+  OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 1, 2}, 0.089f, 123);
+  OperandType type6(Type::TENSOR_QUANT8_ASYMM, {2, 1, 2}, 0.029f, 0);
+  OperandType type7(Type::TENSOR_QUANT8_ASYMM, {2, 1, 8}, 0.1f, 127);
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type3);
+  auto input1 = model->addOperand(&type4);
+  auto input2 = model->addOperand(&type5);
+  auto input3 = model->addOperand(&type6);
+  auto param = model->addOperand(&type2);
+  auto output0 = model->addOperand(&type7);
+  // Phase 2, operations
+  static int32_t param_init[] = {2};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_CONCATENATION, {input0, input1, input2, input3, param}, {output0});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0, input1, input2, input3},
+    {output0});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_quant8_2(Model *model) {
+  OperandType type2(Type::INT32, {});
+  OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2, 1, 2}, 0.084f, 127);
+  OperandType type4(Type::TENSOR_QUANT8_ASYMM, {2, 1, 2}, 0.05f, 0);
+  OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 1, 2}, 0.089f, 123);
+  OperandType type6(Type::TENSOR_QUANT8_ASYMM, {2, 1, 2}, 0.029f, 0);
+  OperandType type8(Type::TENSOR_QUANT8_ASYMM, {2, 1, 8}, 0.0078125f, 127);
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type3);
+  auto input1 = model->addOperand(&type4);
+  auto input2 = model->addOperand(&type5);
+  auto input3 = model->addOperand(&type6);
+  auto param = model->addOperand(&type2);
+  auto output0 = model->addOperand(&type8);
+  // Phase 2, operations
+  static int32_t param_init[] = {2};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_CONCATENATION, {input0, input1, input2, input3, param}, {output0});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0, input1, input2, input3},
+    {output0});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_quant8_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/models/depth_to_space_v1_2.model.cpp b/nn/runtime/test/generated/models/depth_to_space_v1_2.model.cpp
index 5e8aa29..7c0f2b1 100644
--- a/nn/runtime/test/generated/models/depth_to_space_v1_2.model.cpp
+++ b/nn/runtime/test/generated/models/depth_to_space_v1_2.model.cpp
@@ -58,11 +58,11 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nhwc_quant8(Model *model) {
+void CreateModel_nhwc_float16(Model *model) {
   OperandType type0(Type::BOOL, {});
   OperandType type3(Type::INT32, {});
-  OperandType type8(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 8}, 0.1f, 0);
-  OperandType type9(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.1f, 0);
+  OperandType type8(Type::TENSOR_FLOAT16, {1, 1, 1, 8});
+  OperandType type9(Type::TENSOR_FLOAT16, {1, 2, 2, 2});
   // Phase 1, operands
   auto op1 = model->addOperand(&type8);
   auto param = model->addOperand(&type3);
@@ -81,6 +81,34 @@
   assert(model->isValid());
 }
 
+inline bool is_ignored_nhwc_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nhwc_quant8(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type10(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 8}, 0.1f, 0);
+  OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.1f, 0);
+  OperandType type3(Type::INT32, {});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type10);
+  auto param = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op4 = model->addOperand(&type11);
+  // Phase 2, operations
+  static int32_t param_init[] = {2};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTH_TO_SPACE, {op1, param, layout}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op4});
+  assert(model->isValid());
+}
+
 inline bool is_ignored_nhwc_quant8(int i) {
   static std::set<int> ignore = {};
   return ignore.find(i) != ignore.end();
@@ -88,11 +116,11 @@
 
 void CreateModel_nchw(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type10(Type::TENSOR_FLOAT32, {1, 8, 1, 1});
+  OperandType type12(Type::TENSOR_FLOAT32, {1, 8, 1, 1});
   OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
   OperandType type3(Type::INT32, {});
   // Phase 1, operands
-  auto op1 = model->addOperand(&type10);
+  auto op1 = model->addOperand(&type12);
   auto param = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
   auto op4 = model->addOperand(&type2);
@@ -116,11 +144,11 @@
 
 void CreateModel_nchw_relaxed(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type10(Type::TENSOR_FLOAT32, {1, 8, 1, 1});
+  OperandType type12(Type::TENSOR_FLOAT32, {1, 8, 1, 1});
   OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
   OperandType type3(Type::INT32, {});
   // Phase 1, operands
-  auto op1 = model->addOperand(&type10);
+  auto op1 = model->addOperand(&type12);
   auto param = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
   auto op4 = model->addOperand(&type2);
@@ -144,13 +172,13 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nchw_quant8(Model *model) {
+void CreateModel_nchw_float16(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 8, 1, 1}, 0.1f, 0);
+  OperandType type13(Type::TENSOR_FLOAT16, {1, 8, 1, 1});
   OperandType type3(Type::INT32, {});
-  OperandType type9(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.1f, 0);
+  OperandType type9(Type::TENSOR_FLOAT16, {1, 2, 2, 2});
   // Phase 1, operands
-  auto op1 = model->addOperand(&type11);
+  auto op1 = model->addOperand(&type13);
   auto param = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
   auto op4 = model->addOperand(&type9);
@@ -167,6 +195,34 @@
   assert(model->isValid());
 }
 
+inline bool is_ignored_nchw_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nchw_quant8(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.1f, 0);
+  OperandType type14(Type::TENSOR_QUANT8_ASYMM, {1, 8, 1, 1}, 0.1f, 0);
+  OperandType type3(Type::INT32, {});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type14);
+  auto param = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op4 = model->addOperand(&type11);
+  // Phase 2, operations
+  static int32_t param_init[] = {2};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTH_TO_SPACE, {op1, param, layout}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op4});
+  assert(model->isValid());
+}
+
 inline bool is_ignored_nchw_quant8(int i) {
   static std::set<int> ignore = {};
   return ignore.find(i) != ignore.end();
@@ -230,16 +286,44 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nhwc_quant8_2(Model *model) {
+void CreateModel_nhwc_float16_2(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type12(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.5f, 128);
-  OperandType type13(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 1}, 0.5f, 128);
+  OperandType type15(Type::TENSOR_FLOAT16, {1, 2, 2, 4});
+  OperandType type16(Type::TENSOR_FLOAT16, {1, 4, 4, 1});
   OperandType type3(Type::INT32, {});
   // Phase 1, operands
-  auto op11 = model->addOperand(&type12);
+  auto op11 = model->addOperand(&type15);
   auto param1 = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
-  auto op41 = model->addOperand(&type13);
+  auto op41 = model->addOperand(&type16);
+  // Phase 2, operations
+  static int32_t param1_init[] = {2};
+  model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTH_TO_SPACE, {op11, param1, layout}, {op41});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op11},
+    {op41});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nhwc_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nhwc_quant8_2(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type17(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.5f, 128);
+  OperandType type18(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 1}, 0.5f, 128);
+  OperandType type3(Type::INT32, {});
+  // Phase 1, operands
+  auto op11 = model->addOperand(&type17);
+  auto param1 = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op41 = model->addOperand(&type18);
   // Phase 2, operations
   static int32_t param1_init[] = {2};
   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
@@ -260,14 +344,14 @@
 
 void CreateModel_nchw_2(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type14(Type::TENSOR_FLOAT32, {1, 4, 2, 2});
-  OperandType type15(Type::TENSOR_FLOAT32, {1, 1, 4, 4});
+  OperandType type19(Type::TENSOR_FLOAT32, {1, 4, 2, 2});
+  OperandType type20(Type::TENSOR_FLOAT32, {1, 1, 4, 4});
   OperandType type3(Type::INT32, {});
   // Phase 1, operands
-  auto op11 = model->addOperand(&type14);
+  auto op11 = model->addOperand(&type19);
   auto param1 = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
-  auto op41 = model->addOperand(&type15);
+  auto op41 = model->addOperand(&type20);
   // Phase 2, operations
   static int32_t param1_init[] = {2};
   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
@@ -288,14 +372,14 @@
 
 void CreateModel_nchw_relaxed_2(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type14(Type::TENSOR_FLOAT32, {1, 4, 2, 2});
-  OperandType type15(Type::TENSOR_FLOAT32, {1, 1, 4, 4});
+  OperandType type19(Type::TENSOR_FLOAT32, {1, 4, 2, 2});
+  OperandType type20(Type::TENSOR_FLOAT32, {1, 1, 4, 4});
   OperandType type3(Type::INT32, {});
   // Phase 1, operands
-  auto op11 = model->addOperand(&type14);
+  auto op11 = model->addOperand(&type19);
   auto param1 = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
-  auto op41 = model->addOperand(&type15);
+  auto op41 = model->addOperand(&type20);
   // Phase 2, operations
   static int32_t param1_init[] = {2};
   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
@@ -316,16 +400,44 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nchw_quant8_2(Model *model) {
+void CreateModel_nchw_float16_2(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type16(Type::TENSOR_QUANT8_ASYMM, {1, 4, 2, 2}, 0.5f, 128);
-  OperandType type17(Type::TENSOR_QUANT8_ASYMM, {1, 1, 4, 4}, 0.5f, 128);
+  OperandType type21(Type::TENSOR_FLOAT16, {1, 4, 2, 2});
+  OperandType type22(Type::TENSOR_FLOAT16, {1, 1, 4, 4});
   OperandType type3(Type::INT32, {});
   // Phase 1, operands
-  auto op11 = model->addOperand(&type16);
+  auto op11 = model->addOperand(&type21);
   auto param1 = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
-  auto op41 = model->addOperand(&type17);
+  auto op41 = model->addOperand(&type22);
+  // Phase 2, operations
+  static int32_t param1_init[] = {2};
+  model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTH_TO_SPACE, {op11, param1, layout}, {op41});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op11},
+    {op41});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nchw_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nchw_quant8_2(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type23(Type::TENSOR_QUANT8_ASYMM, {1, 4, 2, 2}, 0.5f, 128);
+  OperandType type24(Type::TENSOR_QUANT8_ASYMM, {1, 1, 4, 4}, 0.5f, 128);
+  OperandType type3(Type::INT32, {});
+  // Phase 1, operands
+  auto op11 = model->addOperand(&type23);
+  auto param1 = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op41 = model->addOperand(&type24);
   // Phase 2, operations
   static int32_t param1_init[] = {2};
   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
@@ -402,16 +514,44 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nhwc_quant8_3(Model *model) {
+void CreateModel_nhwc_float16_3(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type18(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 8}, 1.0f, 0);
-  OperandType type19(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 2}, 1.0f, 0);
+  OperandType type25(Type::TENSOR_FLOAT16, {1, 2, 2, 8});
+  OperandType type26(Type::TENSOR_FLOAT16, {1, 4, 4, 2});
   OperandType type3(Type::INT32, {});
   // Phase 1, operands
-  auto op12 = model->addOperand(&type18);
+  auto op12 = model->addOperand(&type25);
   auto param2 = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
-  auto op42 = model->addOperand(&type19);
+  auto op42 = model->addOperand(&type26);
+  // Phase 2, operations
+  static int32_t param2_init[] = {2};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTH_TO_SPACE, {op12, param2, layout}, {op42});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op12},
+    {op42});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nhwc_float16_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nhwc_quant8_3(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type27(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 8}, 1.0f, 0);
+  OperandType type28(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 2}, 1.0f, 0);
+  OperandType type3(Type::INT32, {});
+  // Phase 1, operands
+  auto op12 = model->addOperand(&type27);
+  auto param2 = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op42 = model->addOperand(&type28);
   // Phase 2, operations
   static int32_t param2_init[] = {2};
   model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
@@ -432,14 +572,14 @@
 
 void CreateModel_nchw_3(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type20(Type::TENSOR_FLOAT32, {1, 8, 2, 2});
-  OperandType type21(Type::TENSOR_FLOAT32, {1, 2, 4, 4});
+  OperandType type29(Type::TENSOR_FLOAT32, {1, 8, 2, 2});
   OperandType type3(Type::INT32, {});
+  OperandType type30(Type::TENSOR_FLOAT32, {1, 2, 4, 4});
   // Phase 1, operands
-  auto op12 = model->addOperand(&type20);
+  auto op12 = model->addOperand(&type29);
   auto param2 = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
-  auto op42 = model->addOperand(&type21);
+  auto op42 = model->addOperand(&type30);
   // Phase 2, operations
   static int32_t param2_init[] = {2};
   model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
@@ -460,14 +600,14 @@
 
 void CreateModel_nchw_relaxed_3(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type20(Type::TENSOR_FLOAT32, {1, 8, 2, 2});
-  OperandType type21(Type::TENSOR_FLOAT32, {1, 2, 4, 4});
+  OperandType type29(Type::TENSOR_FLOAT32, {1, 8, 2, 2});
   OperandType type3(Type::INT32, {});
+  OperandType type30(Type::TENSOR_FLOAT32, {1, 2, 4, 4});
   // Phase 1, operands
-  auto op12 = model->addOperand(&type20);
+  auto op12 = model->addOperand(&type29);
   auto param2 = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
-  auto op42 = model->addOperand(&type21);
+  auto op42 = model->addOperand(&type30);
   // Phase 2, operations
   static int32_t param2_init[] = {2};
   model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
@@ -488,16 +628,44 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nchw_quant8_3(Model *model) {
+void CreateModel_nchw_float16_3(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type22(Type::TENSOR_QUANT8_ASYMM, {1, 8, 2, 2}, 1.0f, 0);
-  OperandType type23(Type::TENSOR_QUANT8_ASYMM, {1, 2, 4, 4}, 1.0f, 0);
   OperandType type3(Type::INT32, {});
+  OperandType type31(Type::TENSOR_FLOAT16, {1, 8, 2, 2});
+  OperandType type32(Type::TENSOR_FLOAT16, {1, 2, 4, 4});
   // Phase 1, operands
-  auto op12 = model->addOperand(&type22);
+  auto op12 = model->addOperand(&type31);
   auto param2 = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
-  auto op42 = model->addOperand(&type23);
+  auto op42 = model->addOperand(&type32);
+  // Phase 2, operations
+  static int32_t param2_init[] = {2};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTH_TO_SPACE, {op12, param2, layout}, {op42});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op12},
+    {op42});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nchw_float16_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nchw_quant8_3(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type3(Type::INT32, {});
+  OperandType type33(Type::TENSOR_QUANT8_ASYMM, {1, 8, 2, 2}, 1.0f, 0);
+  OperandType type34(Type::TENSOR_QUANT8_ASYMM, {1, 2, 4, 4}, 1.0f, 0);
+  // Phase 1, operands
+  auto op12 = model->addOperand(&type33);
+  auto param2 = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op42 = model->addOperand(&type34);
   // Phase 2, operations
   static int32_t param2_init[] = {2};
   model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
diff --git a/nn/runtime/test/generated/models/depthwise_conv2d_v1_2.model.cpp b/nn/runtime/test/generated/models/depthwise_conv2d_v1_2.model.cpp
index c5c282c..8245f51 100644
--- a/nn/runtime/test/generated/models/depthwise_conv2d_v1_2.model.cpp
+++ b/nn/runtime/test/generated/models/depthwise_conv2d_v1_2.model.cpp
@@ -114,12 +114,11 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nhwc_quant8(Model *model) {
+void CreateModel_nhwc_float16(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 3, 3, 2}, 0.5f, 0);
-  OperandType type12(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.01f, 0);
-  OperandType type13(Type::TENSOR_INT32, {4}, 0.005f, 0);
-  OperandType type14(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.1f, 0);
+  OperandType type11(Type::TENSOR_FLOAT16, {1, 3, 3, 2});
+  OperandType type12(Type::TENSOR_FLOAT16, {1, 2, 2, 4});
+  OperandType type13(Type::TENSOR_FLOAT16, {4});
   OperandType type4(Type::INT32, {});
   // Phase 1, operands
   auto op1 = model->addOperand(&type11);
@@ -134,7 +133,64 @@
   auto param6 = model->addOperand(&type4);
   auto param7 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op4 = model->addOperand(&type14);
+  auto op4 = model->addOperand(&type12);
+  // Phase 2, operations
+  static _Float16 op2_init[] = {0.25f, 0.0f, 0.20000000298023224f, 0.0f, 0.25f, 0.0f, 0.0f, 0.30000001192092896f, 0.25f, 0.0f, 0.0f, 0.0f, 0.25f, 0.10000000149011612f, 0.0f, 0.0f};
+  model->setOperandValue(op2, op2_init, sizeof(_Float16) * 16);
+  static _Float16 op3_init[] = {1.0f, 2.0f, 3.0f, 4.0f};
+  model->setOperandValue(op3, op3_init, sizeof(_Float16) * 4);
+  static int32_t param_init[] = {0};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static int32_t param1_init[] = {0};
+  model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
+  static int32_t param2_init[] = {0};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
+  static int32_t param3_init[] = {0};
+  model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
+  static int32_t param4_init[] = {1};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {1};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static int32_t param6_init[] = {2};
+  model->setOperandValue(param6, param6_init, sizeof(int32_t) * 1);
+  static int32_t param7_init[] = {0};
+  model->setOperandValue(param7, param7_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, param, param1, param2, param3, param4, param5, param6, param7, layout}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op4});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nhwc_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nhwc_quant8(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type14(Type::TENSOR_QUANT8_ASYMM, {1, 3, 3, 2}, 0.5f, 0);
+  OperandType type15(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.01f, 0);
+  OperandType type16(Type::TENSOR_INT32, {4}, 0.005f, 0);
+  OperandType type17(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.1f, 0);
+  OperandType type4(Type::INT32, {});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type14);
+  auto op2 = model->addOperand(&type15);
+  auto op3 = model->addOperand(&type16);
+  auto param = model->addOperand(&type4);
+  auto param1 = model->addOperand(&type4);
+  auto param2 = model->addOperand(&type4);
+  auto param3 = model->addOperand(&type4);
+  auto param4 = model->addOperand(&type4);
+  auto param5 = model->addOperand(&type4);
+  auto param6 = model->addOperand(&type4);
+  auto param7 = model->addOperand(&type4);
+  auto layout = model->addOperand(&type0);
+  auto op4 = model->addOperand(&type17);
   // Phase 2, operations
   static uint8_t op2_init[] = {25, 0, 20, 0, 25, 0, 0, 30, 25, 0, 0, 0, 25, 10, 0, 0};
   model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 16);
@@ -277,12 +333,11 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nhwc_weight_as_input_quant8(Model *model) {
+void CreateModel_nhwc_weight_as_input_float16(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 3, 3, 2}, 0.5f, 0);
-  OperandType type12(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.01f, 0);
-  OperandType type13(Type::TENSOR_INT32, {4}, 0.005f, 0);
-  OperandType type14(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.1f, 0);
+  OperandType type11(Type::TENSOR_FLOAT16, {1, 3, 3, 2});
+  OperandType type12(Type::TENSOR_FLOAT16, {1, 2, 2, 4});
+  OperandType type13(Type::TENSOR_FLOAT16, {4});
   OperandType type4(Type::INT32, {});
   // Phase 1, operands
   auto op1 = model->addOperand(&type11);
@@ -297,7 +352,60 @@
   auto param6 = model->addOperand(&type4);
   auto param7 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op4 = model->addOperand(&type14);
+  auto op4 = model->addOperand(&type12);
+  // Phase 2, operations
+  static int32_t param_init[] = {0};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static int32_t param1_init[] = {0};
+  model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
+  static int32_t param2_init[] = {0};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
+  static int32_t param3_init[] = {0};
+  model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
+  static int32_t param4_init[] = {1};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {1};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static int32_t param6_init[] = {2};
+  model->setOperandValue(param6, param6_init, sizeof(int32_t) * 1);
+  static int32_t param7_init[] = {0};
+  model->setOperandValue(param7, param7_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, param, param1, param2, param3, param4, param5, param6, param7, layout}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1, op2, op3},
+    {op4});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nhwc_weight_as_input_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nhwc_weight_as_input_quant8(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type14(Type::TENSOR_QUANT8_ASYMM, {1, 3, 3, 2}, 0.5f, 0);
+  OperandType type15(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.01f, 0);
+  OperandType type16(Type::TENSOR_INT32, {4}, 0.005f, 0);
+  OperandType type17(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.1f, 0);
+  OperandType type4(Type::INT32, {});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type14);
+  auto op2 = model->addOperand(&type15);
+  auto op3 = model->addOperand(&type16);
+  auto param = model->addOperand(&type4);
+  auto param1 = model->addOperand(&type4);
+  auto param2 = model->addOperand(&type4);
+  auto param3 = model->addOperand(&type4);
+  auto param4 = model->addOperand(&type4);
+  auto param5 = model->addOperand(&type4);
+  auto param6 = model->addOperand(&type4);
+  auto param7 = model->addOperand(&type4);
+  auto layout = model->addOperand(&type0);
+  auto op4 = model->addOperand(&type17);
   // Phase 2, operations
   static int32_t param_init[] = {0};
   model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
@@ -332,13 +440,13 @@
 
 void CreateModel_nchw(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type15(Type::TENSOR_FLOAT32, {1, 2, 3, 3});
-  OperandType type16(Type::TENSOR_FLOAT32, {1, 4, 2, 2});
+  OperandType type18(Type::TENSOR_FLOAT32, {1, 2, 3, 3});
+  OperandType type19(Type::TENSOR_FLOAT32, {1, 4, 2, 2});
   OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 4});
   OperandType type3(Type::TENSOR_FLOAT32, {4});
   OperandType type4(Type::INT32, {});
   // Phase 1, operands
-  auto op1 = model->addOperand(&type15);
+  auto op1 = model->addOperand(&type18);
   auto op2 = model->addOperand(&type2);
   auto op3 = model->addOperand(&type3);
   auto param = model->addOperand(&type4);
@@ -350,7 +458,7 @@
   auto param6 = model->addOperand(&type4);
   auto param7 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op4 = model->addOperand(&type16);
+  auto op4 = model->addOperand(&type19);
   // Phase 2, operations
   static float op2_init[] = {0.25f, 0.0f, 0.2f, 0.0f, 0.25f, 0.0f, 0.0f, 0.3f, 0.25f, 0.0f, 0.0f, 0.0f, 0.25f, 0.1f, 0.0f, 0.0f};
   model->setOperandValue(op2, op2_init, sizeof(float) * 16);
@@ -389,13 +497,13 @@
 
 void CreateModel_nchw_relaxed(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type15(Type::TENSOR_FLOAT32, {1, 2, 3, 3});
-  OperandType type16(Type::TENSOR_FLOAT32, {1, 4, 2, 2});
+  OperandType type18(Type::TENSOR_FLOAT32, {1, 2, 3, 3});
+  OperandType type19(Type::TENSOR_FLOAT32, {1, 4, 2, 2});
   OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 4});
   OperandType type3(Type::TENSOR_FLOAT32, {4});
   OperandType type4(Type::INT32, {});
   // Phase 1, operands
-  auto op1 = model->addOperand(&type15);
+  auto op1 = model->addOperand(&type18);
   auto op2 = model->addOperand(&type2);
   auto op3 = model->addOperand(&type3);
   auto param = model->addOperand(&type4);
@@ -407,7 +515,7 @@
   auto param6 = model->addOperand(&type4);
   auto param7 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op4 = model->addOperand(&type16);
+  auto op4 = model->addOperand(&type19);
   // Phase 2, operations
   static float op2_init[] = {0.25f, 0.0f, 0.2f, 0.0f, 0.25f, 0.0f, 0.0f, 0.3f, 0.25f, 0.0f, 0.0f, 0.0f, 0.25f, 0.1f, 0.0f, 0.0f};
   model->setOperandValue(op2, op2_init, sizeof(float) * 16);
@@ -446,15 +554,15 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nchw_quant8(Model *model) {
+void CreateModel_nchw_float16(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type12(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.01f, 0);
-  OperandType type13(Type::TENSOR_INT32, {4}, 0.005f, 0);
-  OperandType type17(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3, 3}, 0.5f, 0);
-  OperandType type18(Type::TENSOR_QUANT8_ASYMM, {1, 4, 2, 2}, 0.1f, 0);
+  OperandType type12(Type::TENSOR_FLOAT16, {1, 2, 2, 4});
+  OperandType type13(Type::TENSOR_FLOAT16, {4});
+  OperandType type20(Type::TENSOR_FLOAT16, {1, 2, 3, 3});
+  OperandType type21(Type::TENSOR_FLOAT16, {1, 4, 2, 2});
   OperandType type4(Type::INT32, {});
   // Phase 1, operands
-  auto op1 = model->addOperand(&type17);
+  auto op1 = model->addOperand(&type20);
   auto op2 = model->addOperand(&type12);
   auto op3 = model->addOperand(&type13);
   auto param = model->addOperand(&type4);
@@ -466,7 +574,64 @@
   auto param6 = model->addOperand(&type4);
   auto param7 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op4 = model->addOperand(&type18);
+  auto op4 = model->addOperand(&type21);
+  // Phase 2, operations
+  static _Float16 op2_init[] = {0.25f, 0.0f, 0.20000000298023224f, 0.0f, 0.25f, 0.0f, 0.0f, 0.30000001192092896f, 0.25f, 0.0f, 0.0f, 0.0f, 0.25f, 0.10000000149011612f, 0.0f, 0.0f};
+  model->setOperandValue(op2, op2_init, sizeof(_Float16) * 16);
+  static _Float16 op3_init[] = {1.0f, 2.0f, 3.0f, 4.0f};
+  model->setOperandValue(op3, op3_init, sizeof(_Float16) * 4);
+  static int32_t param_init[] = {0};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static int32_t param1_init[] = {0};
+  model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
+  static int32_t param2_init[] = {0};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
+  static int32_t param3_init[] = {0};
+  model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
+  static int32_t param4_init[] = {1};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {1};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static int32_t param6_init[] = {2};
+  model->setOperandValue(param6, param6_init, sizeof(int32_t) * 1);
+  static int32_t param7_init[] = {0};
+  model->setOperandValue(param7, param7_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, param, param1, param2, param3, param4, param5, param6, param7, layout}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op4});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nchw_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nchw_quant8(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type15(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.01f, 0);
+  OperandType type16(Type::TENSOR_INT32, {4}, 0.005f, 0);
+  OperandType type22(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3, 3}, 0.5f, 0);
+  OperandType type23(Type::TENSOR_QUANT8_ASYMM, {1, 4, 2, 2}, 0.1f, 0);
+  OperandType type4(Type::INT32, {});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type22);
+  auto op2 = model->addOperand(&type15);
+  auto op3 = model->addOperand(&type16);
+  auto param = model->addOperand(&type4);
+  auto param1 = model->addOperand(&type4);
+  auto param2 = model->addOperand(&type4);
+  auto param3 = model->addOperand(&type4);
+  auto param4 = model->addOperand(&type4);
+  auto param5 = model->addOperand(&type4);
+  auto param6 = model->addOperand(&type4);
+  auto param7 = model->addOperand(&type4);
+  auto layout = model->addOperand(&type0);
+  auto op4 = model->addOperand(&type23);
   // Phase 2, operations
   static uint8_t op2_init[] = {25, 0, 20, 0, 25, 0, 0, 30, 25, 0, 0, 0, 25, 10, 0, 0};
   model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 16);
@@ -505,13 +670,13 @@
 
 void CreateModel_nchw_weight_as_input(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type15(Type::TENSOR_FLOAT32, {1, 2, 3, 3});
-  OperandType type16(Type::TENSOR_FLOAT32, {1, 4, 2, 2});
+  OperandType type18(Type::TENSOR_FLOAT32, {1, 2, 3, 3});
+  OperandType type19(Type::TENSOR_FLOAT32, {1, 4, 2, 2});
   OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 4});
   OperandType type3(Type::TENSOR_FLOAT32, {4});
   OperandType type4(Type::INT32, {});
   // Phase 1, operands
-  auto op1 = model->addOperand(&type15);
+  auto op1 = model->addOperand(&type18);
   auto op2 = model->addOperand(&type2);
   auto op3 = model->addOperand(&type3);
   auto param = model->addOperand(&type4);
@@ -523,7 +688,7 @@
   auto param6 = model->addOperand(&type4);
   auto param7 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op4 = model->addOperand(&type16);
+  auto op4 = model->addOperand(&type19);
   // Phase 2, operations
   static int32_t param_init[] = {0};
   model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
@@ -558,13 +723,13 @@
 
 void CreateModel_nchw_weight_as_input_relaxed(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type15(Type::TENSOR_FLOAT32, {1, 2, 3, 3});
-  OperandType type16(Type::TENSOR_FLOAT32, {1, 4, 2, 2});
+  OperandType type18(Type::TENSOR_FLOAT32, {1, 2, 3, 3});
+  OperandType type19(Type::TENSOR_FLOAT32, {1, 4, 2, 2});
   OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 4});
   OperandType type3(Type::TENSOR_FLOAT32, {4});
   OperandType type4(Type::INT32, {});
   // Phase 1, operands
-  auto op1 = model->addOperand(&type15);
+  auto op1 = model->addOperand(&type18);
   auto op2 = model->addOperand(&type2);
   auto op3 = model->addOperand(&type3);
   auto param = model->addOperand(&type4);
@@ -576,7 +741,7 @@
   auto param6 = model->addOperand(&type4);
   auto param7 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op4 = model->addOperand(&type16);
+  auto op4 = model->addOperand(&type19);
   // Phase 2, operations
   static int32_t param_init[] = {0};
   model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
@@ -611,15 +776,15 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nchw_weight_as_input_quant8(Model *model) {
+void CreateModel_nchw_weight_as_input_float16(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type12(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.01f, 0);
-  OperandType type13(Type::TENSOR_INT32, {4}, 0.005f, 0);
-  OperandType type17(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3, 3}, 0.5f, 0);
-  OperandType type18(Type::TENSOR_QUANT8_ASYMM, {1, 4, 2, 2}, 0.1f, 0);
+  OperandType type12(Type::TENSOR_FLOAT16, {1, 2, 2, 4});
+  OperandType type13(Type::TENSOR_FLOAT16, {4});
+  OperandType type20(Type::TENSOR_FLOAT16, {1, 2, 3, 3});
+  OperandType type21(Type::TENSOR_FLOAT16, {1, 4, 2, 2});
   OperandType type4(Type::INT32, {});
   // Phase 1, operands
-  auto op1 = model->addOperand(&type17);
+  auto op1 = model->addOperand(&type20);
   auto op2 = model->addOperand(&type12);
   auto op3 = model->addOperand(&type13);
   auto param = model->addOperand(&type4);
@@ -631,7 +796,60 @@
   auto param6 = model->addOperand(&type4);
   auto param7 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op4 = model->addOperand(&type18);
+  auto op4 = model->addOperand(&type21);
+  // Phase 2, operations
+  static int32_t param_init[] = {0};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static int32_t param1_init[] = {0};
+  model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
+  static int32_t param2_init[] = {0};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
+  static int32_t param3_init[] = {0};
+  model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
+  static int32_t param4_init[] = {1};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {1};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static int32_t param6_init[] = {2};
+  model->setOperandValue(param6, param6_init, sizeof(int32_t) * 1);
+  static int32_t param7_init[] = {0};
+  model->setOperandValue(param7, param7_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, param, param1, param2, param3, param4, param5, param6, param7, layout}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1, op2, op3},
+    {op4});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nchw_weight_as_input_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nchw_weight_as_input_quant8(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type15(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.01f, 0);
+  OperandType type16(Type::TENSOR_INT32, {4}, 0.005f, 0);
+  OperandType type22(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3, 3}, 0.5f, 0);
+  OperandType type23(Type::TENSOR_QUANT8_ASYMM, {1, 4, 2, 2}, 0.1f, 0);
+  OperandType type4(Type::INT32, {});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type22);
+  auto op2 = model->addOperand(&type15);
+  auto op3 = model->addOperand(&type16);
+  auto param = model->addOperand(&type4);
+  auto param1 = model->addOperand(&type4);
+  auto param2 = model->addOperand(&type4);
+  auto param3 = model->addOperand(&type4);
+  auto param4 = model->addOperand(&type4);
+  auto param5 = model->addOperand(&type4);
+  auto param6 = model->addOperand(&type4);
+  auto param7 = model->addOperand(&type4);
+  auto layout = model->addOperand(&type0);
+  auto op4 = model->addOperand(&type23);
   // Phase 2, operations
   static int32_t param_init[] = {0};
   model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
@@ -762,24 +980,72 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nhwc_quant8_2(Model *model) {
+void CreateModel_nhwc_float16_2(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type19(Type::TENSOR_QUANT8_ASYMM, {1, 3, 2, 2}, 0.5f, 128);
-  OperandType type20(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.5f, 128);
-  OperandType type21(Type::TENSOR_INT32, {4}, 0.25f, 0);
-  OperandType type22(Type::TENSOR_QUANT8_ASYMM, {1, 2, 1, 4}, 1.0f, 100);
+  OperandType type12(Type::TENSOR_FLOAT16, {1, 2, 2, 4});
+  OperandType type13(Type::TENSOR_FLOAT16, {4});
+  OperandType type24(Type::TENSOR_FLOAT16, {1, 3, 2, 2});
+  OperandType type25(Type::TENSOR_FLOAT16, {1, 2, 1, 4});
   OperandType type4(Type::INT32, {});
   // Phase 1, operands
-  auto op11 = model->addOperand(&type19);
-  auto op21 = model->addOperand(&type20);
-  auto op31 = model->addOperand(&type21);
+  auto op11 = model->addOperand(&type24);
+  auto op21 = model->addOperand(&type12);
+  auto op31 = model->addOperand(&type13);
   auto param8 = model->addOperand(&type4);
   auto param9 = model->addOperand(&type4);
   auto param10 = model->addOperand(&type4);
   auto param11 = model->addOperand(&type4);
   auto param12 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op41 = model->addOperand(&type22);
+  auto op41 = model->addOperand(&type25);
+  // Phase 2, operations
+  static _Float16 op21_init[] = {1.0f, 2.0f, 3.0f, 4.0f, -9.0f, 10.0f, -11.0f, 12.0f, 5.0f, 6.0f, 7.0f, 8.0f, 13.0f, -14.0f, 15.0f, -16.0f};
+  model->setOperandValue(op21, op21_init, sizeof(_Float16) * 16);
+  static _Float16 op31_init[] = {1.0f, 2.0f, 3.0f, 4.0f};
+  model->setOperandValue(op31, op31_init, sizeof(_Float16) * 4);
+  static int32_t param8_init[] = {2};
+  model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
+  static int32_t param9_init[] = {1};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static int32_t param10_init[] = {1};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  static int32_t param11_init[] = {2};
+  model->setOperandValue(param11, param11_init, sizeof(int32_t) * 1);
+  static int32_t param12_init[] = {0};
+  model->setOperandValue(param12, param12_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op11, op21, op31, param8, param9, param10, param11, param12, layout}, {op41});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op11},
+    {op41});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nhwc_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nhwc_quant8_2(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type26(Type::TENSOR_QUANT8_ASYMM, {1, 3, 2, 2}, 0.5f, 128);
+  OperandType type27(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.5f, 128);
+  OperandType type28(Type::TENSOR_INT32, {4}, 0.25f, 0);
+  OperandType type29(Type::TENSOR_QUANT8_ASYMM, {1, 2, 1, 4}, 1.0f, 100);
+  OperandType type4(Type::INT32, {});
+  // Phase 1, operands
+  auto op11 = model->addOperand(&type26);
+  auto op21 = model->addOperand(&type27);
+  auto op31 = model->addOperand(&type28);
+  auto param8 = model->addOperand(&type4);
+  auto param9 = model->addOperand(&type4);
+  auto param10 = model->addOperand(&type4);
+  auto param11 = model->addOperand(&type4);
+  auto param12 = model->addOperand(&type4);
+  auto layout = model->addOperand(&type0);
+  auto op41 = model->addOperand(&type29);
   // Phase 2, operations
   static uint8_t op21_init[] = {130, 132, 134, 136, 110, 148, 106, 152, 138, 140, 142, 144, 154, 100, 158, 96};
   model->setOperandValue(op21, op21_init, sizeof(uint8_t) * 16);
@@ -900,24 +1166,68 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nhwc_weight_as_input_quant8_2(Model *model) {
+void CreateModel_nhwc_weight_as_input_float16_2(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type19(Type::TENSOR_QUANT8_ASYMM, {1, 3, 2, 2}, 0.5f, 128);
-  OperandType type20(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.5f, 128);
-  OperandType type21(Type::TENSOR_INT32, {4}, 0.25f, 0);
-  OperandType type22(Type::TENSOR_QUANT8_ASYMM, {1, 2, 1, 4}, 1.0f, 100);
+  OperandType type12(Type::TENSOR_FLOAT16, {1, 2, 2, 4});
+  OperandType type13(Type::TENSOR_FLOAT16, {4});
+  OperandType type24(Type::TENSOR_FLOAT16, {1, 3, 2, 2});
+  OperandType type25(Type::TENSOR_FLOAT16, {1, 2, 1, 4});
   OperandType type4(Type::INT32, {});
   // Phase 1, operands
-  auto op11 = model->addOperand(&type19);
-  auto op21 = model->addOperand(&type20);
-  auto op31 = model->addOperand(&type21);
+  auto op11 = model->addOperand(&type24);
+  auto op21 = model->addOperand(&type12);
+  auto op31 = model->addOperand(&type13);
   auto param8 = model->addOperand(&type4);
   auto param9 = model->addOperand(&type4);
   auto param10 = model->addOperand(&type4);
   auto param11 = model->addOperand(&type4);
   auto param12 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op41 = model->addOperand(&type22);
+  auto op41 = model->addOperand(&type25);
+  // Phase 2, operations
+  static int32_t param8_init[] = {2};
+  model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
+  static int32_t param9_init[] = {1};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static int32_t param10_init[] = {1};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  static int32_t param11_init[] = {2};
+  model->setOperandValue(param11, param11_init, sizeof(int32_t) * 1);
+  static int32_t param12_init[] = {0};
+  model->setOperandValue(param12, param12_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op11, op21, op31, param8, param9, param10, param11, param12, layout}, {op41});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op11, op21, op31},
+    {op41});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nhwc_weight_as_input_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nhwc_weight_as_input_quant8_2(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type26(Type::TENSOR_QUANT8_ASYMM, {1, 3, 2, 2}, 0.5f, 128);
+  OperandType type27(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.5f, 128);
+  OperandType type28(Type::TENSOR_INT32, {4}, 0.25f, 0);
+  OperandType type29(Type::TENSOR_QUANT8_ASYMM, {1, 2, 1, 4}, 1.0f, 100);
+  OperandType type4(Type::INT32, {});
+  // Phase 1, operands
+  auto op11 = model->addOperand(&type26);
+  auto op21 = model->addOperand(&type27);
+  auto op31 = model->addOperand(&type28);
+  auto param8 = model->addOperand(&type4);
+  auto param9 = model->addOperand(&type4);
+  auto param10 = model->addOperand(&type4);
+  auto param11 = model->addOperand(&type4);
+  auto param12 = model->addOperand(&type4);
+  auto layout = model->addOperand(&type0);
+  auto op41 = model->addOperand(&type29);
   // Phase 2, operations
   static int32_t param8_init[] = {2};
   model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
@@ -947,12 +1257,12 @@
 void CreateModel_nchw_2(Model *model) {
   OperandType type0(Type::BOOL, {});
   OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 4});
-  OperandType type23(Type::TENSOR_FLOAT32, {1, 2, 3, 2});
-  OperandType type24(Type::TENSOR_FLOAT32, {1, 4, 2, 1});
   OperandType type3(Type::TENSOR_FLOAT32, {4});
+  OperandType type30(Type::TENSOR_FLOAT32, {1, 2, 3, 2});
+  OperandType type31(Type::TENSOR_FLOAT32, {1, 4, 2, 1});
   OperandType type4(Type::INT32, {});
   // Phase 1, operands
-  auto op11 = model->addOperand(&type23);
+  auto op11 = model->addOperand(&type30);
   auto op21 = model->addOperand(&type2);
   auto op31 = model->addOperand(&type3);
   auto param8 = model->addOperand(&type4);
@@ -961,7 +1271,7 @@
   auto param11 = model->addOperand(&type4);
   auto param12 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op41 = model->addOperand(&type24);
+  auto op41 = model->addOperand(&type31);
   // Phase 2, operations
   static float op21_init[] = {1.0f, 2.0f, 3.0f, 4.0f, -9.0f, 10.0f, -11.0f, 12.0f, 5.0f, 6.0f, 7.0f, 8.0f, 13.0f, -14.0f, 15.0f, -16.0f};
   model->setOperandValue(op21, op21_init, sizeof(float) * 16);
@@ -995,12 +1305,12 @@
 void CreateModel_nchw_relaxed_2(Model *model) {
   OperandType type0(Type::BOOL, {});
   OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 4});
-  OperandType type23(Type::TENSOR_FLOAT32, {1, 2, 3, 2});
-  OperandType type24(Type::TENSOR_FLOAT32, {1, 4, 2, 1});
   OperandType type3(Type::TENSOR_FLOAT32, {4});
+  OperandType type30(Type::TENSOR_FLOAT32, {1, 2, 3, 2});
+  OperandType type31(Type::TENSOR_FLOAT32, {1, 4, 2, 1});
   OperandType type4(Type::INT32, {});
   // Phase 1, operands
-  auto op11 = model->addOperand(&type23);
+  auto op11 = model->addOperand(&type30);
   auto op21 = model->addOperand(&type2);
   auto op31 = model->addOperand(&type3);
   auto param8 = model->addOperand(&type4);
@@ -1009,7 +1319,7 @@
   auto param11 = model->addOperand(&type4);
   auto param12 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op41 = model->addOperand(&type24);
+  auto op41 = model->addOperand(&type31);
   // Phase 2, operations
   static float op21_init[] = {1.0f, 2.0f, 3.0f, 4.0f, -9.0f, 10.0f, -11.0f, 12.0f, 5.0f, 6.0f, 7.0f, 8.0f, 13.0f, -14.0f, 15.0f, -16.0f};
   model->setOperandValue(op21, op21_init, sizeof(float) * 16);
@@ -1042,24 +1352,72 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nchw_quant8_2(Model *model) {
+void CreateModel_nchw_float16_2(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type20(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.5f, 128);
-  OperandType type21(Type::TENSOR_INT32, {4}, 0.25f, 0);
-  OperandType type25(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3, 2}, 0.5f, 128);
-  OperandType type26(Type::TENSOR_QUANT8_ASYMM, {1, 4, 2, 1}, 1.0f, 100);
+  OperandType type12(Type::TENSOR_FLOAT16, {1, 2, 2, 4});
+  OperandType type13(Type::TENSOR_FLOAT16, {4});
+  OperandType type32(Type::TENSOR_FLOAT16, {1, 2, 3, 2});
+  OperandType type33(Type::TENSOR_FLOAT16, {1, 4, 2, 1});
   OperandType type4(Type::INT32, {});
   // Phase 1, operands
-  auto op11 = model->addOperand(&type25);
-  auto op21 = model->addOperand(&type20);
-  auto op31 = model->addOperand(&type21);
+  auto op11 = model->addOperand(&type32);
+  auto op21 = model->addOperand(&type12);
+  auto op31 = model->addOperand(&type13);
   auto param8 = model->addOperand(&type4);
   auto param9 = model->addOperand(&type4);
   auto param10 = model->addOperand(&type4);
   auto param11 = model->addOperand(&type4);
   auto param12 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op41 = model->addOperand(&type26);
+  auto op41 = model->addOperand(&type33);
+  // Phase 2, operations
+  static _Float16 op21_init[] = {1.0f, 2.0f, 3.0f, 4.0f, -9.0f, 10.0f, -11.0f, 12.0f, 5.0f, 6.0f, 7.0f, 8.0f, 13.0f, -14.0f, 15.0f, -16.0f};
+  model->setOperandValue(op21, op21_init, sizeof(_Float16) * 16);
+  static _Float16 op31_init[] = {1.0f, 2.0f, 3.0f, 4.0f};
+  model->setOperandValue(op31, op31_init, sizeof(_Float16) * 4);
+  static int32_t param8_init[] = {2};
+  model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
+  static int32_t param9_init[] = {1};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static int32_t param10_init[] = {1};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  static int32_t param11_init[] = {2};
+  model->setOperandValue(param11, param11_init, sizeof(int32_t) * 1);
+  static int32_t param12_init[] = {0};
+  model->setOperandValue(param12, param12_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op11, op21, op31, param8, param9, param10, param11, param12, layout}, {op41});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op11},
+    {op41});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nchw_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nchw_quant8_2(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type27(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.5f, 128);
+  OperandType type28(Type::TENSOR_INT32, {4}, 0.25f, 0);
+  OperandType type34(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3, 2}, 0.5f, 128);
+  OperandType type35(Type::TENSOR_QUANT8_ASYMM, {1, 4, 2, 1}, 1.0f, 100);
+  OperandType type4(Type::INT32, {});
+  // Phase 1, operands
+  auto op11 = model->addOperand(&type34);
+  auto op21 = model->addOperand(&type27);
+  auto op31 = model->addOperand(&type28);
+  auto param8 = model->addOperand(&type4);
+  auto param9 = model->addOperand(&type4);
+  auto param10 = model->addOperand(&type4);
+  auto param11 = model->addOperand(&type4);
+  auto param12 = model->addOperand(&type4);
+  auto layout = model->addOperand(&type0);
+  auto op41 = model->addOperand(&type35);
   // Phase 2, operations
   static uint8_t op21_init[] = {130, 132, 134, 136, 110, 148, 106, 152, 138, 140, 142, 144, 154, 100, 158, 96};
   model->setOperandValue(op21, op21_init, sizeof(uint8_t) * 16);
@@ -1093,12 +1451,12 @@
 void CreateModel_nchw_weight_as_input_2(Model *model) {
   OperandType type0(Type::BOOL, {});
   OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 4});
-  OperandType type23(Type::TENSOR_FLOAT32, {1, 2, 3, 2});
-  OperandType type24(Type::TENSOR_FLOAT32, {1, 4, 2, 1});
   OperandType type3(Type::TENSOR_FLOAT32, {4});
+  OperandType type30(Type::TENSOR_FLOAT32, {1, 2, 3, 2});
+  OperandType type31(Type::TENSOR_FLOAT32, {1, 4, 2, 1});
   OperandType type4(Type::INT32, {});
   // Phase 1, operands
-  auto op11 = model->addOperand(&type23);
+  auto op11 = model->addOperand(&type30);
   auto op21 = model->addOperand(&type2);
   auto op31 = model->addOperand(&type3);
   auto param8 = model->addOperand(&type4);
@@ -1107,7 +1465,7 @@
   auto param11 = model->addOperand(&type4);
   auto param12 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op41 = model->addOperand(&type24);
+  auto op41 = model->addOperand(&type31);
   // Phase 2, operations
   static int32_t param8_init[] = {2};
   model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
@@ -1137,12 +1495,12 @@
 void CreateModel_nchw_weight_as_input_relaxed_2(Model *model) {
   OperandType type0(Type::BOOL, {});
   OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 4});
-  OperandType type23(Type::TENSOR_FLOAT32, {1, 2, 3, 2});
-  OperandType type24(Type::TENSOR_FLOAT32, {1, 4, 2, 1});
   OperandType type3(Type::TENSOR_FLOAT32, {4});
+  OperandType type30(Type::TENSOR_FLOAT32, {1, 2, 3, 2});
+  OperandType type31(Type::TENSOR_FLOAT32, {1, 4, 2, 1});
   OperandType type4(Type::INT32, {});
   // Phase 1, operands
-  auto op11 = model->addOperand(&type23);
+  auto op11 = model->addOperand(&type30);
   auto op21 = model->addOperand(&type2);
   auto op31 = model->addOperand(&type3);
   auto param8 = model->addOperand(&type4);
@@ -1151,7 +1509,7 @@
   auto param11 = model->addOperand(&type4);
   auto param12 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op41 = model->addOperand(&type24);
+  auto op41 = model->addOperand(&type31);
   // Phase 2, operations
   static int32_t param8_init[] = {2};
   model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
@@ -1180,24 +1538,68 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nchw_weight_as_input_quant8_2(Model *model) {
+void CreateModel_nchw_weight_as_input_float16_2(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type20(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.5f, 128);
-  OperandType type21(Type::TENSOR_INT32, {4}, 0.25f, 0);
-  OperandType type25(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3, 2}, 0.5f, 128);
-  OperandType type26(Type::TENSOR_QUANT8_ASYMM, {1, 4, 2, 1}, 1.0f, 100);
+  OperandType type12(Type::TENSOR_FLOAT16, {1, 2, 2, 4});
+  OperandType type13(Type::TENSOR_FLOAT16, {4});
+  OperandType type32(Type::TENSOR_FLOAT16, {1, 2, 3, 2});
+  OperandType type33(Type::TENSOR_FLOAT16, {1, 4, 2, 1});
   OperandType type4(Type::INT32, {});
   // Phase 1, operands
-  auto op11 = model->addOperand(&type25);
-  auto op21 = model->addOperand(&type20);
-  auto op31 = model->addOperand(&type21);
+  auto op11 = model->addOperand(&type32);
+  auto op21 = model->addOperand(&type12);
+  auto op31 = model->addOperand(&type13);
   auto param8 = model->addOperand(&type4);
   auto param9 = model->addOperand(&type4);
   auto param10 = model->addOperand(&type4);
   auto param11 = model->addOperand(&type4);
   auto param12 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op41 = model->addOperand(&type26);
+  auto op41 = model->addOperand(&type33);
+  // Phase 2, operations
+  static int32_t param8_init[] = {2};
+  model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
+  static int32_t param9_init[] = {1};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static int32_t param10_init[] = {1};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  static int32_t param11_init[] = {2};
+  model->setOperandValue(param11, param11_init, sizeof(int32_t) * 1);
+  static int32_t param12_init[] = {0};
+  model->setOperandValue(param12, param12_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op11, op21, op31, param8, param9, param10, param11, param12, layout}, {op41});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op11, op21, op31},
+    {op41});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nchw_weight_as_input_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nchw_weight_as_input_quant8_2(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type27(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.5f, 128);
+  OperandType type28(Type::TENSOR_INT32, {4}, 0.25f, 0);
+  OperandType type34(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3, 2}, 0.5f, 128);
+  OperandType type35(Type::TENSOR_QUANT8_ASYMM, {1, 4, 2, 1}, 1.0f, 100);
+  OperandType type4(Type::INT32, {});
+  // Phase 1, operands
+  auto op11 = model->addOperand(&type34);
+  auto op21 = model->addOperand(&type27);
+  auto op31 = model->addOperand(&type28);
+  auto param8 = model->addOperand(&type4);
+  auto param9 = model->addOperand(&type4);
+  auto param10 = model->addOperand(&type4);
+  auto param11 = model->addOperand(&type4);
+  auto param12 = model->addOperand(&type4);
+  auto layout = model->addOperand(&type0);
+  auto op41 = model->addOperand(&type35);
   // Phase 2, operations
   static int32_t param8_init[] = {2};
   model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
@@ -1338,17 +1740,16 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_large_nhwc_quant8(Model *model) {
+void CreateModel_large_nhwc_float16(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type27(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 100);
-  OperandType type28(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.125f, 128);
-  OperandType type29(Type::TENSOR_INT32, {2}, 0.0625f, 0);
-  OperandType type30(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 2}, 2.0f, 128);
+  OperandType type36(Type::TENSOR_FLOAT16, {1, 2, 2, 2});
+  OperandType type37(Type::TENSOR_FLOAT16, {2});
+  OperandType type38(Type::TENSOR_FLOAT16, {1, 1, 1, 2});
   OperandType type4(Type::INT32, {});
   // Phase 1, operands
-  auto op12 = model->addOperand(&type27);
-  auto op22 = model->addOperand(&type28);
-  auto op32 = model->addOperand(&type29);
+  auto op12 = model->addOperand(&type36);
+  auto op22 = model->addOperand(&type36);
+  auto op32 = model->addOperand(&type37);
   auto param13 = model->addOperand(&type4);
   auto param14 = model->addOperand(&type4);
   auto param15 = model->addOperand(&type4);
@@ -1358,7 +1759,64 @@
   auto param19 = model->addOperand(&type4);
   auto param20 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op42 = model->addOperand(&type30);
+  auto op42 = model->addOperand(&type38);
+  // Phase 2, operations
+  static _Float16 op22_init[] = {0.25f, 0.0f, 0.25f, 1.0f, 0.25f, 0.0f, 0.25f, 1.0f};
+  model->setOperandValue(op22, op22_init, sizeof(_Float16) * 8);
+  static _Float16 op32_init[] = {100.0f, 200.0f};
+  model->setOperandValue(op32, op32_init, sizeof(_Float16) * 2);
+  static int32_t param13_init[] = {0};
+  model->setOperandValue(param13, param13_init, sizeof(int32_t) * 1);
+  static int32_t param14_init[] = {0};
+  model->setOperandValue(param14, param14_init, sizeof(int32_t) * 1);
+  static int32_t param15_init[] = {0};
+  model->setOperandValue(param15, param15_init, sizeof(int32_t) * 1);
+  static int32_t param16_init[] = {0};
+  model->setOperandValue(param16, param16_init, sizeof(int32_t) * 1);
+  static int32_t param17_init[] = {1};
+  model->setOperandValue(param17, param17_init, sizeof(int32_t) * 1);
+  static int32_t param18_init[] = {1};
+  model->setOperandValue(param18, param18_init, sizeof(int32_t) * 1);
+  static int32_t param19_init[] = {1};
+  model->setOperandValue(param19, param19_init, sizeof(int32_t) * 1);
+  static int32_t param20_init[] = {0};
+  model->setOperandValue(param20, param20_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op12, op22, op32, param13, param14, param15, param16, param17, param18, param19, param20, layout}, {op42});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op12},
+    {op42});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_large_nhwc_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_large_nhwc_quant8(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type39(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 100);
+  OperandType type4(Type::INT32, {});
+  OperandType type40(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.125f, 128);
+  OperandType type41(Type::TENSOR_INT32, {2}, 0.0625f, 0);
+  OperandType type42(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 2}, 2.0f, 128);
+  // Phase 1, operands
+  auto op12 = model->addOperand(&type39);
+  auto op22 = model->addOperand(&type40);
+  auto op32 = model->addOperand(&type41);
+  auto param13 = model->addOperand(&type4);
+  auto param14 = model->addOperand(&type4);
+  auto param15 = model->addOperand(&type4);
+  auto param16 = model->addOperand(&type4);
+  auto param17 = model->addOperand(&type4);
+  auto param18 = model->addOperand(&type4);
+  auto param19 = model->addOperand(&type4);
+  auto param20 = model->addOperand(&type4);
+  auto layout = model->addOperand(&type0);
+  auto op42 = model->addOperand(&type42);
   // Phase 2, operations
   static uint8_t op22_init[] = {130, 128, 130, 136, 130, 128, 130, 136};
   model->setOperandValue(op22, op22_init, sizeof(uint8_t) * 8);
@@ -1501,17 +1959,16 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_large_nhwc_weight_as_input_quant8(Model *model) {
+void CreateModel_large_nhwc_weight_as_input_float16(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type27(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 100);
-  OperandType type28(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.125f, 128);
-  OperandType type29(Type::TENSOR_INT32, {2}, 0.0625f, 0);
-  OperandType type30(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 2}, 2.0f, 128);
+  OperandType type36(Type::TENSOR_FLOAT16, {1, 2, 2, 2});
+  OperandType type37(Type::TENSOR_FLOAT16, {2});
+  OperandType type38(Type::TENSOR_FLOAT16, {1, 1, 1, 2});
   OperandType type4(Type::INT32, {});
   // Phase 1, operands
-  auto op12 = model->addOperand(&type27);
-  auto op22 = model->addOperand(&type28);
-  auto op32 = model->addOperand(&type29);
+  auto op12 = model->addOperand(&type36);
+  auto op22 = model->addOperand(&type36);
+  auto op32 = model->addOperand(&type37);
   auto param13 = model->addOperand(&type4);
   auto param14 = model->addOperand(&type4);
   auto param15 = model->addOperand(&type4);
@@ -1521,7 +1978,60 @@
   auto param19 = model->addOperand(&type4);
   auto param20 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op42 = model->addOperand(&type30);
+  auto op42 = model->addOperand(&type38);
+  // Phase 2, operations
+  static int32_t param13_init[] = {0};
+  model->setOperandValue(param13, param13_init, sizeof(int32_t) * 1);
+  static int32_t param14_init[] = {0};
+  model->setOperandValue(param14, param14_init, sizeof(int32_t) * 1);
+  static int32_t param15_init[] = {0};
+  model->setOperandValue(param15, param15_init, sizeof(int32_t) * 1);
+  static int32_t param16_init[] = {0};
+  model->setOperandValue(param16, param16_init, sizeof(int32_t) * 1);
+  static int32_t param17_init[] = {1};
+  model->setOperandValue(param17, param17_init, sizeof(int32_t) * 1);
+  static int32_t param18_init[] = {1};
+  model->setOperandValue(param18, param18_init, sizeof(int32_t) * 1);
+  static int32_t param19_init[] = {1};
+  model->setOperandValue(param19, param19_init, sizeof(int32_t) * 1);
+  static int32_t param20_init[] = {0};
+  model->setOperandValue(param20, param20_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op12, op22, op32, param13, param14, param15, param16, param17, param18, param19, param20, layout}, {op42});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op12, op22, op32},
+    {op42});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_large_nhwc_weight_as_input_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_large_nhwc_weight_as_input_quant8(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type39(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 100);
+  OperandType type4(Type::INT32, {});
+  OperandType type40(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.125f, 128);
+  OperandType type41(Type::TENSOR_INT32, {2}, 0.0625f, 0);
+  OperandType type42(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 2}, 2.0f, 128);
+  // Phase 1, operands
+  auto op12 = model->addOperand(&type39);
+  auto op22 = model->addOperand(&type40);
+  auto op32 = model->addOperand(&type41);
+  auto param13 = model->addOperand(&type4);
+  auto param14 = model->addOperand(&type4);
+  auto param15 = model->addOperand(&type4);
+  auto param16 = model->addOperand(&type4);
+  auto param17 = model->addOperand(&type4);
+  auto param18 = model->addOperand(&type4);
+  auto param19 = model->addOperand(&type4);
+  auto param20 = model->addOperand(&type4);
+  auto layout = model->addOperand(&type0);
+  auto op42 = model->addOperand(&type42);
   // Phase 2, operations
   static int32_t param13_init[] = {0};
   model->setOperandValue(param13, param13_init, sizeof(int32_t) * 1);
@@ -1556,8 +2066,8 @@
 
 void CreateModel_large_nchw(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type31(Type::TENSOR_FLOAT32, {1, 2, 1, 1});
   OperandType type4(Type::INT32, {});
+  OperandType type43(Type::TENSOR_FLOAT32, {1, 2, 1, 1});
   OperandType type7(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
   OperandType type8(Type::TENSOR_FLOAT32, {2});
   // Phase 1, operands
@@ -1573,7 +2083,7 @@
   auto param19 = model->addOperand(&type4);
   auto param20 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op42 = model->addOperand(&type31);
+  auto op42 = model->addOperand(&type43);
   // Phase 2, operations
   static float op22_init[] = {0.25f, 0.0f, 0.25f, 1.0f, 0.25f, 0.0f, 0.25f, 1.0f};
   model->setOperandValue(op22, op22_init, sizeof(float) * 8);
@@ -1612,8 +2122,8 @@
 
 void CreateModel_large_nchw_relaxed(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type31(Type::TENSOR_FLOAT32, {1, 2, 1, 1});
   OperandType type4(Type::INT32, {});
+  OperandType type43(Type::TENSOR_FLOAT32, {1, 2, 1, 1});
   OperandType type7(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
   OperandType type8(Type::TENSOR_FLOAT32, {2});
   // Phase 1, operands
@@ -1629,7 +2139,7 @@
   auto param19 = model->addOperand(&type4);
   auto param20 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op42 = model->addOperand(&type31);
+  auto op42 = model->addOperand(&type43);
   // Phase 2, operations
   static float op22_init[] = {0.25f, 0.0f, 0.25f, 1.0f, 0.25f, 0.0f, 0.25f, 1.0f};
   model->setOperandValue(op22, op22_init, sizeof(float) * 8);
@@ -1668,17 +2178,16 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_large_nchw_quant8(Model *model) {
+void CreateModel_large_nchw_float16(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type27(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 100);
-  OperandType type28(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.125f, 128);
-  OperandType type29(Type::TENSOR_INT32, {2}, 0.0625f, 0);
-  OperandType type32(Type::TENSOR_QUANT8_ASYMM, {1, 2, 1, 1}, 2.0f, 128);
+  OperandType type36(Type::TENSOR_FLOAT16, {1, 2, 2, 2});
+  OperandType type37(Type::TENSOR_FLOAT16, {2});
   OperandType type4(Type::INT32, {});
+  OperandType type44(Type::TENSOR_FLOAT16, {1, 2, 1, 1});
   // Phase 1, operands
-  auto op12 = model->addOperand(&type27);
-  auto op22 = model->addOperand(&type28);
-  auto op32 = model->addOperand(&type29);
+  auto op12 = model->addOperand(&type36);
+  auto op22 = model->addOperand(&type36);
+  auto op32 = model->addOperand(&type37);
   auto param13 = model->addOperand(&type4);
   auto param14 = model->addOperand(&type4);
   auto param15 = model->addOperand(&type4);
@@ -1688,7 +2197,64 @@
   auto param19 = model->addOperand(&type4);
   auto param20 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op42 = model->addOperand(&type32);
+  auto op42 = model->addOperand(&type44);
+  // Phase 2, operations
+  static _Float16 op22_init[] = {0.25f, 0.0f, 0.25f, 1.0f, 0.25f, 0.0f, 0.25f, 1.0f};
+  model->setOperandValue(op22, op22_init, sizeof(_Float16) * 8);
+  static _Float16 op32_init[] = {100.0f, 200.0f};
+  model->setOperandValue(op32, op32_init, sizeof(_Float16) * 2);
+  static int32_t param13_init[] = {0};
+  model->setOperandValue(param13, param13_init, sizeof(int32_t) * 1);
+  static int32_t param14_init[] = {0};
+  model->setOperandValue(param14, param14_init, sizeof(int32_t) * 1);
+  static int32_t param15_init[] = {0};
+  model->setOperandValue(param15, param15_init, sizeof(int32_t) * 1);
+  static int32_t param16_init[] = {0};
+  model->setOperandValue(param16, param16_init, sizeof(int32_t) * 1);
+  static int32_t param17_init[] = {1};
+  model->setOperandValue(param17, param17_init, sizeof(int32_t) * 1);
+  static int32_t param18_init[] = {1};
+  model->setOperandValue(param18, param18_init, sizeof(int32_t) * 1);
+  static int32_t param19_init[] = {1};
+  model->setOperandValue(param19, param19_init, sizeof(int32_t) * 1);
+  static int32_t param20_init[] = {0};
+  model->setOperandValue(param20, param20_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op12, op22, op32, param13, param14, param15, param16, param17, param18, param19, param20, layout}, {op42});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op12},
+    {op42});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_large_nchw_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_large_nchw_quant8(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type39(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 100);
+  OperandType type4(Type::INT32, {});
+  OperandType type40(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.125f, 128);
+  OperandType type41(Type::TENSOR_INT32, {2}, 0.0625f, 0);
+  OperandType type45(Type::TENSOR_QUANT8_ASYMM, {1, 2, 1, 1}, 2.0f, 128);
+  // Phase 1, operands
+  auto op12 = model->addOperand(&type39);
+  auto op22 = model->addOperand(&type40);
+  auto op32 = model->addOperand(&type41);
+  auto param13 = model->addOperand(&type4);
+  auto param14 = model->addOperand(&type4);
+  auto param15 = model->addOperand(&type4);
+  auto param16 = model->addOperand(&type4);
+  auto param17 = model->addOperand(&type4);
+  auto param18 = model->addOperand(&type4);
+  auto param19 = model->addOperand(&type4);
+  auto param20 = model->addOperand(&type4);
+  auto layout = model->addOperand(&type0);
+  auto op42 = model->addOperand(&type45);
   // Phase 2, operations
   static uint8_t op22_init[] = {130, 128, 130, 136, 130, 128, 130, 136};
   model->setOperandValue(op22, op22_init, sizeof(uint8_t) * 8);
@@ -1727,8 +2293,8 @@
 
 void CreateModel_large_nchw_weight_as_input(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type31(Type::TENSOR_FLOAT32, {1, 2, 1, 1});
   OperandType type4(Type::INT32, {});
+  OperandType type43(Type::TENSOR_FLOAT32, {1, 2, 1, 1});
   OperandType type7(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
   OperandType type8(Type::TENSOR_FLOAT32, {2});
   // Phase 1, operands
@@ -1744,7 +2310,7 @@
   auto param19 = model->addOperand(&type4);
   auto param20 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op42 = model->addOperand(&type31);
+  auto op42 = model->addOperand(&type43);
   // Phase 2, operations
   static int32_t param13_init[] = {0};
   model->setOperandValue(param13, param13_init, sizeof(int32_t) * 1);
@@ -1779,8 +2345,8 @@
 
 void CreateModel_large_nchw_weight_as_input_relaxed(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type31(Type::TENSOR_FLOAT32, {1, 2, 1, 1});
   OperandType type4(Type::INT32, {});
+  OperandType type43(Type::TENSOR_FLOAT32, {1, 2, 1, 1});
   OperandType type7(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
   OperandType type8(Type::TENSOR_FLOAT32, {2});
   // Phase 1, operands
@@ -1796,7 +2362,7 @@
   auto param19 = model->addOperand(&type4);
   auto param20 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op42 = model->addOperand(&type31);
+  auto op42 = model->addOperand(&type43);
   // Phase 2, operations
   static int32_t param13_init[] = {0};
   model->setOperandValue(param13, param13_init, sizeof(int32_t) * 1);
@@ -1831,17 +2397,16 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_large_nchw_weight_as_input_quant8(Model *model) {
+void CreateModel_large_nchw_weight_as_input_float16(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type27(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 100);
-  OperandType type28(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.125f, 128);
-  OperandType type29(Type::TENSOR_INT32, {2}, 0.0625f, 0);
-  OperandType type32(Type::TENSOR_QUANT8_ASYMM, {1, 2, 1, 1}, 2.0f, 128);
+  OperandType type36(Type::TENSOR_FLOAT16, {1, 2, 2, 2});
+  OperandType type37(Type::TENSOR_FLOAT16, {2});
   OperandType type4(Type::INT32, {});
+  OperandType type44(Type::TENSOR_FLOAT16, {1, 2, 1, 1});
   // Phase 1, operands
-  auto op12 = model->addOperand(&type27);
-  auto op22 = model->addOperand(&type28);
-  auto op32 = model->addOperand(&type29);
+  auto op12 = model->addOperand(&type36);
+  auto op22 = model->addOperand(&type36);
+  auto op32 = model->addOperand(&type37);
   auto param13 = model->addOperand(&type4);
   auto param14 = model->addOperand(&type4);
   auto param15 = model->addOperand(&type4);
@@ -1851,7 +2416,60 @@
   auto param19 = model->addOperand(&type4);
   auto param20 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op42 = model->addOperand(&type32);
+  auto op42 = model->addOperand(&type44);
+  // Phase 2, operations
+  static int32_t param13_init[] = {0};
+  model->setOperandValue(param13, param13_init, sizeof(int32_t) * 1);
+  static int32_t param14_init[] = {0};
+  model->setOperandValue(param14, param14_init, sizeof(int32_t) * 1);
+  static int32_t param15_init[] = {0};
+  model->setOperandValue(param15, param15_init, sizeof(int32_t) * 1);
+  static int32_t param16_init[] = {0};
+  model->setOperandValue(param16, param16_init, sizeof(int32_t) * 1);
+  static int32_t param17_init[] = {1};
+  model->setOperandValue(param17, param17_init, sizeof(int32_t) * 1);
+  static int32_t param18_init[] = {1};
+  model->setOperandValue(param18, param18_init, sizeof(int32_t) * 1);
+  static int32_t param19_init[] = {1};
+  model->setOperandValue(param19, param19_init, sizeof(int32_t) * 1);
+  static int32_t param20_init[] = {0};
+  model->setOperandValue(param20, param20_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op12, op22, op32, param13, param14, param15, param16, param17, param18, param19, param20, layout}, {op42});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op12, op22, op32},
+    {op42});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_large_nchw_weight_as_input_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_large_nchw_weight_as_input_quant8(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type39(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 100);
+  OperandType type4(Type::INT32, {});
+  OperandType type40(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.125f, 128);
+  OperandType type41(Type::TENSOR_INT32, {2}, 0.0625f, 0);
+  OperandType type45(Type::TENSOR_QUANT8_ASYMM, {1, 2, 1, 1}, 2.0f, 128);
+  // Phase 1, operands
+  auto op12 = model->addOperand(&type39);
+  auto op22 = model->addOperand(&type40);
+  auto op32 = model->addOperand(&type41);
+  auto param13 = model->addOperand(&type4);
+  auto param14 = model->addOperand(&type4);
+  auto param15 = model->addOperand(&type4);
+  auto param16 = model->addOperand(&type4);
+  auto param17 = model->addOperand(&type4);
+  auto param18 = model->addOperand(&type4);
+  auto param19 = model->addOperand(&type4);
+  auto param20 = model->addOperand(&type4);
+  auto layout = model->addOperand(&type0);
+  auto op42 = model->addOperand(&type45);
   // Phase 2, operations
   static int32_t param13_init[] = {0};
   model->setOperandValue(param13, param13_init, sizeof(int32_t) * 1);
@@ -1998,17 +2616,16 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_large_nhwc_quant8_2(Model *model) {
+void CreateModel_large_nhwc_float16_2(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type20(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.5f, 128);
-  OperandType type33(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.25f, 0);
-  OperandType type34(Type::TENSOR_INT32, {4}, 0.125f, 0);
-  OperandType type35(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 4}, 50.0f, 0);
+  OperandType type12(Type::TENSOR_FLOAT16, {1, 2, 2, 4});
+  OperandType type13(Type::TENSOR_FLOAT16, {4});
   OperandType type4(Type::INT32, {});
+  OperandType type46(Type::TENSOR_FLOAT16, {1, 1, 1, 4});
   // Phase 1, operands
-  auto op13 = model->addOperand(&type20);
-  auto op23 = model->addOperand(&type33);
-  auto op33 = model->addOperand(&type34);
+  auto op13 = model->addOperand(&type12);
+  auto op23 = model->addOperand(&type12);
+  auto op33 = model->addOperand(&type13);
   auto param21 = model->addOperand(&type4);
   auto param22 = model->addOperand(&type4);
   auto param23 = model->addOperand(&type4);
@@ -2018,7 +2635,64 @@
   auto param27 = model->addOperand(&type4);
   auto param28 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op43 = model->addOperand(&type35);
+  auto op43 = model->addOperand(&type46);
+  // Phase 2, operations
+  static _Float16 op23_init[] = {0.25f, 0.0f, 10.0f, 50.0f, 0.25f, 1.0f, 20.0f, 50.0f, 0.25f, 0.0f, 30.0f, 50.0f, 0.25f, 1.0f, 40.0f, 50.0f};
+  model->setOperandValue(op23, op23_init, sizeof(_Float16) * 16);
+  static _Float16 op33_init[] = {6000.0f, 7000.0f, 8000.0f, 9000.0f};
+  model->setOperandValue(op33, op33_init, sizeof(_Float16) * 4);
+  static int32_t param21_init[] = {0};
+  model->setOperandValue(param21, param21_init, sizeof(int32_t) * 1);
+  static int32_t param22_init[] = {0};
+  model->setOperandValue(param22, param22_init, sizeof(int32_t) * 1);
+  static int32_t param23_init[] = {0};
+  model->setOperandValue(param23, param23_init, sizeof(int32_t) * 1);
+  static int32_t param24_init[] = {0};
+  model->setOperandValue(param24, param24_init, sizeof(int32_t) * 1);
+  static int32_t param25_init[] = {1};
+  model->setOperandValue(param25, param25_init, sizeof(int32_t) * 1);
+  static int32_t param26_init[] = {1};
+  model->setOperandValue(param26, param26_init, sizeof(int32_t) * 1);
+  static int32_t param27_init[] = {1};
+  model->setOperandValue(param27, param27_init, sizeof(int32_t) * 1);
+  static int32_t param28_init[] = {0};
+  model->setOperandValue(param28, param28_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op13, op23, op33, param21, param22, param23, param24, param25, param26, param27, param28, layout}, {op43});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op13},
+    {op43});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_large_nhwc_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_large_nhwc_quant8_2(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type27(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.5f, 128);
+  OperandType type4(Type::INT32, {});
+  OperandType type47(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.25f, 0);
+  OperandType type48(Type::TENSOR_INT32, {4}, 0.125f, 0);
+  OperandType type49(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 4}, 50.0f, 0);
+  // Phase 1, operands
+  auto op13 = model->addOperand(&type27);
+  auto op23 = model->addOperand(&type47);
+  auto op33 = model->addOperand(&type48);
+  auto param21 = model->addOperand(&type4);
+  auto param22 = model->addOperand(&type4);
+  auto param23 = model->addOperand(&type4);
+  auto param24 = model->addOperand(&type4);
+  auto param25 = model->addOperand(&type4);
+  auto param26 = model->addOperand(&type4);
+  auto param27 = model->addOperand(&type4);
+  auto param28 = model->addOperand(&type4);
+  auto layout = model->addOperand(&type0);
+  auto op43 = model->addOperand(&type49);
   // Phase 2, operations
   static uint8_t op23_init[] = {1, 0, 40, 200, 1, 4, 80, 200, 1, 0, 120, 200, 1, 4, 160, 200};
   model->setOperandValue(op23, op23_init, sizeof(uint8_t) * 16);
@@ -2161,17 +2835,16 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_large_nhwc_weight_as_input_quant8_2(Model *model) {
+void CreateModel_large_nhwc_weight_as_input_float16_2(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type20(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.5f, 128);
-  OperandType type33(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.25f, 0);
-  OperandType type34(Type::TENSOR_INT32, {4}, 0.125f, 0);
-  OperandType type35(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 4}, 50.0f, 0);
+  OperandType type12(Type::TENSOR_FLOAT16, {1, 2, 2, 4});
+  OperandType type13(Type::TENSOR_FLOAT16, {4});
   OperandType type4(Type::INT32, {});
+  OperandType type46(Type::TENSOR_FLOAT16, {1, 1, 1, 4});
   // Phase 1, operands
-  auto op13 = model->addOperand(&type20);
-  auto op23 = model->addOperand(&type33);
-  auto op33 = model->addOperand(&type34);
+  auto op13 = model->addOperand(&type12);
+  auto op23 = model->addOperand(&type12);
+  auto op33 = model->addOperand(&type13);
   auto param21 = model->addOperand(&type4);
   auto param22 = model->addOperand(&type4);
   auto param23 = model->addOperand(&type4);
@@ -2181,7 +2854,60 @@
   auto param27 = model->addOperand(&type4);
   auto param28 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op43 = model->addOperand(&type35);
+  auto op43 = model->addOperand(&type46);
+  // Phase 2, operations
+  static int32_t param21_init[] = {0};
+  model->setOperandValue(param21, param21_init, sizeof(int32_t) * 1);
+  static int32_t param22_init[] = {0};
+  model->setOperandValue(param22, param22_init, sizeof(int32_t) * 1);
+  static int32_t param23_init[] = {0};
+  model->setOperandValue(param23, param23_init, sizeof(int32_t) * 1);
+  static int32_t param24_init[] = {0};
+  model->setOperandValue(param24, param24_init, sizeof(int32_t) * 1);
+  static int32_t param25_init[] = {1};
+  model->setOperandValue(param25, param25_init, sizeof(int32_t) * 1);
+  static int32_t param26_init[] = {1};
+  model->setOperandValue(param26, param26_init, sizeof(int32_t) * 1);
+  static int32_t param27_init[] = {1};
+  model->setOperandValue(param27, param27_init, sizeof(int32_t) * 1);
+  static int32_t param28_init[] = {0};
+  model->setOperandValue(param28, param28_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op13, op23, op33, param21, param22, param23, param24, param25, param26, param27, param28, layout}, {op43});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op13, op23, op33},
+    {op43});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_large_nhwc_weight_as_input_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_large_nhwc_weight_as_input_quant8_2(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type27(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.5f, 128);
+  OperandType type4(Type::INT32, {});
+  OperandType type47(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.25f, 0);
+  OperandType type48(Type::TENSOR_INT32, {4}, 0.125f, 0);
+  OperandType type49(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 4}, 50.0f, 0);
+  // Phase 1, operands
+  auto op13 = model->addOperand(&type27);
+  auto op23 = model->addOperand(&type47);
+  auto op33 = model->addOperand(&type48);
+  auto param21 = model->addOperand(&type4);
+  auto param22 = model->addOperand(&type4);
+  auto param23 = model->addOperand(&type4);
+  auto param24 = model->addOperand(&type4);
+  auto param25 = model->addOperand(&type4);
+  auto param26 = model->addOperand(&type4);
+  auto param27 = model->addOperand(&type4);
+  auto param28 = model->addOperand(&type4);
+  auto layout = model->addOperand(&type0);
+  auto op43 = model->addOperand(&type49);
   // Phase 2, operations
   static int32_t param21_init[] = {0};
   model->setOperandValue(param21, param21_init, sizeof(int32_t) * 1);
@@ -2216,13 +2942,13 @@
 
 void CreateModel_large_nchw_2(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type16(Type::TENSOR_FLOAT32, {1, 4, 2, 2});
+  OperandType type19(Type::TENSOR_FLOAT32, {1, 4, 2, 2});
   OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 4});
   OperandType type3(Type::TENSOR_FLOAT32, {4});
-  OperandType type36(Type::TENSOR_FLOAT32, {1, 4, 1, 1});
   OperandType type4(Type::INT32, {});
+  OperandType type50(Type::TENSOR_FLOAT32, {1, 4, 1, 1});
   // Phase 1, operands
-  auto op13 = model->addOperand(&type16);
+  auto op13 = model->addOperand(&type19);
   auto op23 = model->addOperand(&type2);
   auto op33 = model->addOperand(&type3);
   auto param21 = model->addOperand(&type4);
@@ -2234,7 +2960,7 @@
   auto param27 = model->addOperand(&type4);
   auto param28 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op43 = model->addOperand(&type36);
+  auto op43 = model->addOperand(&type50);
   // Phase 2, operations
   static float op23_init[] = {0.25f, 0.0f, 10.0f, 50.0f, 0.25f, 1.0f, 20.0f, 50.0f, 0.25f, 0.0f, 30.0f, 50.0f, 0.25f, 1.0f, 40.0f, 50.0f};
   model->setOperandValue(op23, op23_init, sizeof(float) * 16);
@@ -2273,13 +2999,13 @@
 
 void CreateModel_large_nchw_relaxed_2(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type16(Type::TENSOR_FLOAT32, {1, 4, 2, 2});
+  OperandType type19(Type::TENSOR_FLOAT32, {1, 4, 2, 2});
   OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 4});
   OperandType type3(Type::TENSOR_FLOAT32, {4});
-  OperandType type36(Type::TENSOR_FLOAT32, {1, 4, 1, 1});
   OperandType type4(Type::INT32, {});
+  OperandType type50(Type::TENSOR_FLOAT32, {1, 4, 1, 1});
   // Phase 1, operands
-  auto op13 = model->addOperand(&type16);
+  auto op13 = model->addOperand(&type19);
   auto op23 = model->addOperand(&type2);
   auto op33 = model->addOperand(&type3);
   auto param21 = model->addOperand(&type4);
@@ -2291,7 +3017,7 @@
   auto param27 = model->addOperand(&type4);
   auto param28 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op43 = model->addOperand(&type36);
+  auto op43 = model->addOperand(&type50);
   // Phase 2, operations
   static float op23_init[] = {0.25f, 0.0f, 10.0f, 50.0f, 0.25f, 1.0f, 20.0f, 50.0f, 0.25f, 0.0f, 30.0f, 50.0f, 0.25f, 1.0f, 40.0f, 50.0f};
   model->setOperandValue(op23, op23_init, sizeof(float) * 16);
@@ -2330,17 +3056,17 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_large_nchw_quant8_2(Model *model) {
+void CreateModel_large_nchw_float16_2(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type33(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.25f, 0);
-  OperandType type34(Type::TENSOR_INT32, {4}, 0.125f, 0);
-  OperandType type37(Type::TENSOR_QUANT8_ASYMM, {1, 4, 2, 2}, 0.5f, 128);
-  OperandType type38(Type::TENSOR_QUANT8_ASYMM, {1, 4, 1, 1}, 50.0f, 0);
+  OperandType type12(Type::TENSOR_FLOAT16, {1, 2, 2, 4});
+  OperandType type13(Type::TENSOR_FLOAT16, {4});
+  OperandType type21(Type::TENSOR_FLOAT16, {1, 4, 2, 2});
   OperandType type4(Type::INT32, {});
+  OperandType type51(Type::TENSOR_FLOAT16, {1, 4, 1, 1});
   // Phase 1, operands
-  auto op13 = model->addOperand(&type37);
-  auto op23 = model->addOperand(&type33);
-  auto op33 = model->addOperand(&type34);
+  auto op13 = model->addOperand(&type21);
+  auto op23 = model->addOperand(&type12);
+  auto op33 = model->addOperand(&type13);
   auto param21 = model->addOperand(&type4);
   auto param22 = model->addOperand(&type4);
   auto param23 = model->addOperand(&type4);
@@ -2350,7 +3076,64 @@
   auto param27 = model->addOperand(&type4);
   auto param28 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op43 = model->addOperand(&type38);
+  auto op43 = model->addOperand(&type51);
+  // Phase 2, operations
+  static _Float16 op23_init[] = {0.25f, 0.0f, 10.0f, 50.0f, 0.25f, 1.0f, 20.0f, 50.0f, 0.25f, 0.0f, 30.0f, 50.0f, 0.25f, 1.0f, 40.0f, 50.0f};
+  model->setOperandValue(op23, op23_init, sizeof(_Float16) * 16);
+  static _Float16 op33_init[] = {6000.0f, 7000.0f, 8000.0f, 9000.0f};
+  model->setOperandValue(op33, op33_init, sizeof(_Float16) * 4);
+  static int32_t param21_init[] = {0};
+  model->setOperandValue(param21, param21_init, sizeof(int32_t) * 1);
+  static int32_t param22_init[] = {0};
+  model->setOperandValue(param22, param22_init, sizeof(int32_t) * 1);
+  static int32_t param23_init[] = {0};
+  model->setOperandValue(param23, param23_init, sizeof(int32_t) * 1);
+  static int32_t param24_init[] = {0};
+  model->setOperandValue(param24, param24_init, sizeof(int32_t) * 1);
+  static int32_t param25_init[] = {1};
+  model->setOperandValue(param25, param25_init, sizeof(int32_t) * 1);
+  static int32_t param26_init[] = {1};
+  model->setOperandValue(param26, param26_init, sizeof(int32_t) * 1);
+  static int32_t param27_init[] = {1};
+  model->setOperandValue(param27, param27_init, sizeof(int32_t) * 1);
+  static int32_t param28_init[] = {0};
+  model->setOperandValue(param28, param28_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op13, op23, op33, param21, param22, param23, param24, param25, param26, param27, param28, layout}, {op43});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op13},
+    {op43});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_large_nchw_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_large_nchw_quant8_2(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type4(Type::INT32, {});
+  OperandType type47(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.25f, 0);
+  OperandType type48(Type::TENSOR_INT32, {4}, 0.125f, 0);
+  OperandType type52(Type::TENSOR_QUANT8_ASYMM, {1, 4, 2, 2}, 0.5f, 128);
+  OperandType type53(Type::TENSOR_QUANT8_ASYMM, {1, 4, 1, 1}, 50.0f, 0);
+  // Phase 1, operands
+  auto op13 = model->addOperand(&type52);
+  auto op23 = model->addOperand(&type47);
+  auto op33 = model->addOperand(&type48);
+  auto param21 = model->addOperand(&type4);
+  auto param22 = model->addOperand(&type4);
+  auto param23 = model->addOperand(&type4);
+  auto param24 = model->addOperand(&type4);
+  auto param25 = model->addOperand(&type4);
+  auto param26 = model->addOperand(&type4);
+  auto param27 = model->addOperand(&type4);
+  auto param28 = model->addOperand(&type4);
+  auto layout = model->addOperand(&type0);
+  auto op43 = model->addOperand(&type53);
   // Phase 2, operations
   static uint8_t op23_init[] = {1, 0, 40, 200, 1, 4, 80, 200, 1, 0, 120, 200, 1, 4, 160, 200};
   model->setOperandValue(op23, op23_init, sizeof(uint8_t) * 16);
@@ -2389,13 +3172,13 @@
 
 void CreateModel_large_nchw_weight_as_input_2(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type16(Type::TENSOR_FLOAT32, {1, 4, 2, 2});
+  OperandType type19(Type::TENSOR_FLOAT32, {1, 4, 2, 2});
   OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 4});
   OperandType type3(Type::TENSOR_FLOAT32, {4});
-  OperandType type36(Type::TENSOR_FLOAT32, {1, 4, 1, 1});
   OperandType type4(Type::INT32, {});
+  OperandType type50(Type::TENSOR_FLOAT32, {1, 4, 1, 1});
   // Phase 1, operands
-  auto op13 = model->addOperand(&type16);
+  auto op13 = model->addOperand(&type19);
   auto op23 = model->addOperand(&type2);
   auto op33 = model->addOperand(&type3);
   auto param21 = model->addOperand(&type4);
@@ -2407,7 +3190,7 @@
   auto param27 = model->addOperand(&type4);
   auto param28 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op43 = model->addOperand(&type36);
+  auto op43 = model->addOperand(&type50);
   // Phase 2, operations
   static int32_t param21_init[] = {0};
   model->setOperandValue(param21, param21_init, sizeof(int32_t) * 1);
@@ -2442,13 +3225,13 @@
 
 void CreateModel_large_nchw_weight_as_input_relaxed_2(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type16(Type::TENSOR_FLOAT32, {1, 4, 2, 2});
+  OperandType type19(Type::TENSOR_FLOAT32, {1, 4, 2, 2});
   OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 4});
   OperandType type3(Type::TENSOR_FLOAT32, {4});
-  OperandType type36(Type::TENSOR_FLOAT32, {1, 4, 1, 1});
   OperandType type4(Type::INT32, {});
+  OperandType type50(Type::TENSOR_FLOAT32, {1, 4, 1, 1});
   // Phase 1, operands
-  auto op13 = model->addOperand(&type16);
+  auto op13 = model->addOperand(&type19);
   auto op23 = model->addOperand(&type2);
   auto op33 = model->addOperand(&type3);
   auto param21 = model->addOperand(&type4);
@@ -2460,7 +3243,7 @@
   auto param27 = model->addOperand(&type4);
   auto param28 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op43 = model->addOperand(&type36);
+  auto op43 = model->addOperand(&type50);
   // Phase 2, operations
   static int32_t param21_init[] = {0};
   model->setOperandValue(param21, param21_init, sizeof(int32_t) * 1);
@@ -2495,17 +3278,17 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_large_nchw_weight_as_input_quant8_2(Model *model) {
+void CreateModel_large_nchw_weight_as_input_float16_2(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type33(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.25f, 0);
-  OperandType type34(Type::TENSOR_INT32, {4}, 0.125f, 0);
-  OperandType type37(Type::TENSOR_QUANT8_ASYMM, {1, 4, 2, 2}, 0.5f, 128);
-  OperandType type38(Type::TENSOR_QUANT8_ASYMM, {1, 4, 1, 1}, 50.0f, 0);
+  OperandType type12(Type::TENSOR_FLOAT16, {1, 2, 2, 4});
+  OperandType type13(Type::TENSOR_FLOAT16, {4});
+  OperandType type21(Type::TENSOR_FLOAT16, {1, 4, 2, 2});
   OperandType type4(Type::INT32, {});
+  OperandType type51(Type::TENSOR_FLOAT16, {1, 4, 1, 1});
   // Phase 1, operands
-  auto op13 = model->addOperand(&type37);
-  auto op23 = model->addOperand(&type33);
-  auto op33 = model->addOperand(&type34);
+  auto op13 = model->addOperand(&type21);
+  auto op23 = model->addOperand(&type12);
+  auto op33 = model->addOperand(&type13);
   auto param21 = model->addOperand(&type4);
   auto param22 = model->addOperand(&type4);
   auto param23 = model->addOperand(&type4);
@@ -2515,7 +3298,60 @@
   auto param27 = model->addOperand(&type4);
   auto param28 = model->addOperand(&type4);
   auto layout = model->addOperand(&type0);
-  auto op43 = model->addOperand(&type38);
+  auto op43 = model->addOperand(&type51);
+  // Phase 2, operations
+  static int32_t param21_init[] = {0};
+  model->setOperandValue(param21, param21_init, sizeof(int32_t) * 1);
+  static int32_t param22_init[] = {0};
+  model->setOperandValue(param22, param22_init, sizeof(int32_t) * 1);
+  static int32_t param23_init[] = {0};
+  model->setOperandValue(param23, param23_init, sizeof(int32_t) * 1);
+  static int32_t param24_init[] = {0};
+  model->setOperandValue(param24, param24_init, sizeof(int32_t) * 1);
+  static int32_t param25_init[] = {1};
+  model->setOperandValue(param25, param25_init, sizeof(int32_t) * 1);
+  static int32_t param26_init[] = {1};
+  model->setOperandValue(param26, param26_init, sizeof(int32_t) * 1);
+  static int32_t param27_init[] = {1};
+  model->setOperandValue(param27, param27_init, sizeof(int32_t) * 1);
+  static int32_t param28_init[] = {0};
+  model->setOperandValue(param28, param28_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op13, op23, op33, param21, param22, param23, param24, param25, param26, param27, param28, layout}, {op43});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op13, op23, op33},
+    {op43});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_large_nchw_weight_as_input_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_large_nchw_weight_as_input_quant8_2(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type4(Type::INT32, {});
+  OperandType type47(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.25f, 0);
+  OperandType type48(Type::TENSOR_INT32, {4}, 0.125f, 0);
+  OperandType type52(Type::TENSOR_QUANT8_ASYMM, {1, 4, 2, 2}, 0.5f, 128);
+  OperandType type53(Type::TENSOR_QUANT8_ASYMM, {1, 4, 1, 1}, 50.0f, 0);
+  // Phase 1, operands
+  auto op13 = model->addOperand(&type52);
+  auto op23 = model->addOperand(&type47);
+  auto op33 = model->addOperand(&type48);
+  auto param21 = model->addOperand(&type4);
+  auto param22 = model->addOperand(&type4);
+  auto param23 = model->addOperand(&type4);
+  auto param24 = model->addOperand(&type4);
+  auto param25 = model->addOperand(&type4);
+  auto param26 = model->addOperand(&type4);
+  auto param27 = model->addOperand(&type4);
+  auto param28 = model->addOperand(&type4);
+  auto layout = model->addOperand(&type0);
+  auto op43 = model->addOperand(&type53);
   // Phase 2, operations
   static int32_t param21_init[] = {0};
   model->setOperandValue(param21, param21_init, sizeof(int32_t) * 1);
diff --git a/nn/runtime/test/generated/models/pad_float16.model.cpp b/nn/runtime/test/generated/models/pad_float16.model.cpp
new file mode 100644
index 0000000..aaa0b76
--- /dev/null
+++ b/nn/runtime/test/generated/models/pad_float16.model.cpp
@@ -0,0 +1,26 @@
+// clang-format off
+// Generated file (from: pad_float16.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT16, {1, 2, 2, 1});
+  OperandType type1(Type::TENSOR_INT32, {4, 2});
+  OperandType type2(Type::TENSOR_FLOAT16, {1, 4, 4, 1});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type1);
+  auto op3 = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t op2_init[] = {0, 0, 1, 1, 1, 1, 0, 0};
+  model->setOperandValue(op2, op2_init, sizeof(int32_t) * 8);
+  model->addOperation(ANEURALNETWORKS_PAD, {op1, op2}, {op3});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op3});
+  assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/models/pad_v2_1_float.model.cpp b/nn/runtime/test/generated/models/pad_v2_1_float.model.cpp
index 47882c3..9009f1b 100644
--- a/nn/runtime/test/generated/models/pad_v2_1_float.model.cpp
+++ b/nn/runtime/test/generated/models/pad_v2_1_float.model.cpp
@@ -28,3 +28,31 @@
   return ignore.find(i) != ignore.end();
 }
 
+void CreateModel_float16(Model *model) {
+  OperandType type1(Type::TENSOR_INT32, {4, 2});
+  OperandType type2(Type::FLOAT32, {});
+  OperandType type4(Type::TENSOR_FLOAT16, {1, 2, 3, 1});
+  OperandType type5(Type::TENSOR_FLOAT16, {1, 4, 7, 1});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type4);
+  auto paddings = model->addOperand(&type1);
+  auto pad_value = model->addOperand(&type2);
+  auto output0 = model->addOperand(&type5);
+  // Phase 2, operations
+  static int32_t paddings_init[] = {0, 0, 0, 2, 1, 3, 0, 0};
+  model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 8);
+  static float pad_value_init[] = {9.3f};
+  model->setOperandValue(pad_value, pad_value_init, sizeof(float) * 1);
+  model->addOperation(ANEURALNETWORKS_PAD_V2, {input0, paddings, pad_value}, {output0});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output0});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/models/reshape_float16.model.cpp b/nn/runtime/test/generated/models/reshape_float16.model.cpp
new file mode 100644
index 0000000..1df1b6a
--- /dev/null
+++ b/nn/runtime/test/generated/models/reshape_float16.model.cpp
@@ -0,0 +1,26 @@
+// clang-format off
+// Generated file (from: reshape_float16.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT16, {1, 1, 3, 3});
+  OperandType type1(Type::TENSOR_INT32, {1});
+  OperandType type2(Type::TENSOR_FLOAT16, {9});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type1);
+  auto op3 = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t op2_init[] = {-1};
+  model->setOperandValue(op2, op2_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_RESHAPE, {op1, op2}, {op3});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op3});
+  assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/models/resize_bilinear_v1_2.model.cpp b/nn/runtime/test/generated/models/resize_bilinear_v1_2.model.cpp
index 1602182..c16c0f1 100644
--- a/nn/runtime/test/generated/models/resize_bilinear_v1_2.model.cpp
+++ b/nn/runtime/test/generated/models/resize_bilinear_v1_2.model.cpp
@@ -64,11 +64,11 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nchw(Model *model) {
+void CreateModel_nhwc_float16(Model *model) {
   OperandType type0(Type::BOOL, {});
   OperandType type3(Type::INT32, {});
-  OperandType type6(Type::TENSOR_FLOAT32, {1, 1, 2, 2});
-  OperandType type7(Type::TENSOR_FLOAT32, {1, 1, 3, 3});
+  OperandType type6(Type::TENSOR_FLOAT16, {1, 2, 2, 1});
+  OperandType type7(Type::TENSOR_FLOAT16, {1, 3, 3, 1});
   // Phase 1, operands
   auto op1 = model->addOperand(&type6);
   auto param = model->addOperand(&type3);
@@ -80,6 +80,37 @@
   model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
   static int32_t param1_init[] = {3};
   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_RESIZE_BILINEAR, {op1, param, param1, layout}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op4});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nhwc_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nchw(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type3(Type::INT32, {});
+  OperandType type8(Type::TENSOR_FLOAT32, {1, 1, 2, 2});
+  OperandType type9(Type::TENSOR_FLOAT32, {1, 1, 3, 3});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type8);
+  auto param = model->addOperand(&type3);
+  auto param1 = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op4 = model->addOperand(&type9);
+  // Phase 2, operations
+  static int32_t param_init[] = {3};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static int32_t param1_init[] = {3};
+  model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
   static bool layout_init[] = {true};
   model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
   model->addOperation(ANEURALNETWORKS_RESIZE_BILINEAR, {op1, param, param1, layout}, {op4});
@@ -98,14 +129,14 @@
 void CreateModel_nchw_relaxed(Model *model) {
   OperandType type0(Type::BOOL, {});
   OperandType type3(Type::INT32, {});
-  OperandType type6(Type::TENSOR_FLOAT32, {1, 1, 2, 2});
-  OperandType type7(Type::TENSOR_FLOAT32, {1, 1, 3, 3});
+  OperandType type8(Type::TENSOR_FLOAT32, {1, 1, 2, 2});
+  OperandType type9(Type::TENSOR_FLOAT32, {1, 1, 3, 3});
   // Phase 1, operands
-  auto op1 = model->addOperand(&type6);
+  auto op1 = model->addOperand(&type8);
   auto param = model->addOperand(&type3);
   auto param1 = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
-  auto op4 = model->addOperand(&type7);
+  auto op4 = model->addOperand(&type9);
   // Phase 2, operations
   static int32_t param_init[] = {3};
   model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
@@ -128,6 +159,37 @@
   return ignore.find(i) != ignore.end();
 }
 
+void CreateModel_nchw_float16(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type10(Type::TENSOR_FLOAT16, {1, 1, 2, 2});
+  OperandType type11(Type::TENSOR_FLOAT16, {1, 1, 3, 3});
+  OperandType type3(Type::INT32, {});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type10);
+  auto param = model->addOperand(&type3);
+  auto param1 = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op4 = model->addOperand(&type11);
+  // Phase 2, operations
+  static int32_t param_init[] = {3};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static int32_t param1_init[] = {3};
+  model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_RESIZE_BILINEAR, {op1, param, param1, layout}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op4});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nchw_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
 void CreateModel_nhwc_2(Model *model) {
   OperandType type0(Type::BOOL, {});
   OperandType type3(Type::INT32, {});
@@ -192,17 +254,48 @@
   return ignore.find(i) != ignore.end();
 }
 
+void CreateModel_nhwc_float16_2(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type12(Type::TENSOR_FLOAT16, {1, 2, 2, 2});
+  OperandType type13(Type::TENSOR_FLOAT16, {1, 3, 3, 2});
+  OperandType type3(Type::INT32, {});
+  // Phase 1, operands
+  auto op11 = model->addOperand(&type12);
+  auto param2 = model->addOperand(&type3);
+  auto param3 = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op41 = model->addOperand(&type13);
+  // Phase 2, operations
+  static int32_t param2_init[] = {3};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
+  static int32_t param3_init[] = {3};
+  model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_RESIZE_BILINEAR, {op11, param2, param3, layout}, {op41});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op11},
+    {op41});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nhwc_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
 void CreateModel_nchw_2(Model *model) {
   OperandType type0(Type::BOOL, {});
+  OperandType type14(Type::TENSOR_FLOAT32, {1, 2, 3, 3});
   OperandType type3(Type::INT32, {});
   OperandType type4(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
-  OperandType type8(Type::TENSOR_FLOAT32, {1, 2, 3, 3});
   // Phase 1, operands
   auto op11 = model->addOperand(&type4);
   auto param2 = model->addOperand(&type3);
   auto param3 = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
-  auto op41 = model->addOperand(&type8);
+  auto op41 = model->addOperand(&type14);
   // Phase 2, operations
   static int32_t param2_init[] = {3};
   model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
@@ -225,15 +318,15 @@
 
 void CreateModel_nchw_relaxed_2(Model *model) {
   OperandType type0(Type::BOOL, {});
+  OperandType type14(Type::TENSOR_FLOAT32, {1, 2, 3, 3});
   OperandType type3(Type::INT32, {});
   OperandType type4(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
-  OperandType type8(Type::TENSOR_FLOAT32, {1, 2, 3, 3});
   // Phase 1, operands
   auto op11 = model->addOperand(&type4);
   auto param2 = model->addOperand(&type3);
   auto param3 = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
-  auto op41 = model->addOperand(&type8);
+  auto op41 = model->addOperand(&type14);
   // Phase 2, operations
   static int32_t param2_init[] = {3};
   model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
@@ -256,3 +349,34 @@
   return ignore.find(i) != ignore.end();
 }
 
+void CreateModel_nchw_float16_2(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type12(Type::TENSOR_FLOAT16, {1, 2, 2, 2});
+  OperandType type15(Type::TENSOR_FLOAT16, {1, 2, 3, 3});
+  OperandType type3(Type::INT32, {});
+  // Phase 1, operands
+  auto op11 = model->addOperand(&type12);
+  auto param2 = model->addOperand(&type3);
+  auto param3 = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op41 = model->addOperand(&type15);
+  // Phase 2, operations
+  static int32_t param2_init[] = {3};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
+  static int32_t param3_init[] = {3};
+  model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_RESIZE_BILINEAR, {op11, param2, param3, layout}, {op41});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op11},
+    {op41});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nchw_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/models/roi_pooling.model.cpp b/nn/runtime/test/generated/models/roi_pooling.model.cpp
new file mode 100644
index 0000000..6d77315
--- /dev/null
+++ b/nn/runtime/test/generated/models/roi_pooling.model.cpp
@@ -0,0 +1,418 @@
+// clang-format off
+// Generated file (from: roi_pooling.mod.py). Do not edit
+void CreateModel_nhwc(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type1(Type::TENSOR_FLOAT32, {1, 4, 4, 1});
+  OperandType type2(Type::TENSOR_FLOAT32, {4, 4});
+  OperandType type3(Type::TENSOR_FLOAT32, {4, 2, 2, 1});
+  OperandType type4(Type::TENSOR_INT32, {2});
+  OperandType type5(Type::FLOAT32, {});
+  // Phase 1, operands
+  auto in = model->addOperand(&type1);
+  auto roi = model->addOperand(&type2);
+  auto param = model->addOperand(&type4);
+  auto param1 = model->addOperand(&type5);
+  auto layout = model->addOperand(&type0);
+  auto out = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t param_init[] = {2, 2};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
+  static float param1_init[] = {0.5f};
+  model->setOperandValue(param1, param1_init, sizeof(float) * 1);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_ROI_POOLING, {in, roi, param, param1, layout}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in, roi},
+    {out});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nhwc(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nhwc_relaxed(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type1(Type::TENSOR_FLOAT32, {1, 4, 4, 1});
+  OperandType type2(Type::TENSOR_FLOAT32, {4, 4});
+  OperandType type3(Type::TENSOR_FLOAT32, {4, 2, 2, 1});
+  OperandType type4(Type::TENSOR_INT32, {2});
+  OperandType type5(Type::FLOAT32, {});
+  // Phase 1, operands
+  auto in = model->addOperand(&type1);
+  auto roi = model->addOperand(&type2);
+  auto param = model->addOperand(&type4);
+  auto param1 = model->addOperand(&type5);
+  auto layout = model->addOperand(&type0);
+  auto out = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t param_init[] = {2, 2};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
+  static float param1_init[] = {0.5f};
+  model->setOperandValue(param1, param1_init, sizeof(float) * 1);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_ROI_POOLING, {in, roi, param, param1, layout}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in, roi},
+    {out});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nhwc_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nhwc_quant8(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type10(Type::TENSOR_QUANT8_ASYMM, {4, 2, 2, 1}, 0.25f, 128);
+  OperandType type2(Type::TENSOR_FLOAT32, {4, 4});
+  OperandType type4(Type::TENSOR_INT32, {2});
+  OperandType type5(Type::FLOAT32, {});
+  OperandType type9(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 1}, 0.25f, 128);
+  // Phase 1, operands
+  auto in = model->addOperand(&type9);
+  auto roi = model->addOperand(&type2);
+  auto param = model->addOperand(&type4);
+  auto param1 = model->addOperand(&type5);
+  auto layout = model->addOperand(&type0);
+  auto out = model->addOperand(&type10);
+  // Phase 2, operations
+  static int32_t param_init[] = {2, 2};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
+  static float param1_init[] = {0.5f};
+  model->setOperandValue(param1, param1_init, sizeof(float) * 1);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_ROI_POOLING, {in, roi, param, param1, layout}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in, roi},
+    {out});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nhwc_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nchw(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type11(Type::TENSOR_FLOAT32, {1, 1, 4, 4});
+  OperandType type12(Type::TENSOR_FLOAT32, {4, 1, 2, 2});
+  OperandType type2(Type::TENSOR_FLOAT32, {4, 4});
+  OperandType type4(Type::TENSOR_INT32, {2});
+  OperandType type5(Type::FLOAT32, {});
+  // Phase 1, operands
+  auto in = model->addOperand(&type11);
+  auto roi = model->addOperand(&type2);
+  auto param = model->addOperand(&type4);
+  auto param1 = model->addOperand(&type5);
+  auto layout = model->addOperand(&type0);
+  auto out = model->addOperand(&type12);
+  // Phase 2, operations
+  static int32_t param_init[] = {2, 2};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
+  static float param1_init[] = {0.5f};
+  model->setOperandValue(param1, param1_init, sizeof(float) * 1);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_ROI_POOLING, {in, roi, param, param1, layout}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in, roi},
+    {out});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nchw(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nchw_relaxed(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type11(Type::TENSOR_FLOAT32, {1, 1, 4, 4});
+  OperandType type12(Type::TENSOR_FLOAT32, {4, 1, 2, 2});
+  OperandType type2(Type::TENSOR_FLOAT32, {4, 4});
+  OperandType type4(Type::TENSOR_INT32, {2});
+  OperandType type5(Type::FLOAT32, {});
+  // Phase 1, operands
+  auto in = model->addOperand(&type11);
+  auto roi = model->addOperand(&type2);
+  auto param = model->addOperand(&type4);
+  auto param1 = model->addOperand(&type5);
+  auto layout = model->addOperand(&type0);
+  auto out = model->addOperand(&type12);
+  // Phase 2, operations
+  static int32_t param_init[] = {2, 2};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
+  static float param1_init[] = {0.5f};
+  model->setOperandValue(param1, param1_init, sizeof(float) * 1);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_ROI_POOLING, {in, roi, param, param1, layout}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in, roi},
+    {out});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nchw_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nchw_quant8(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type13(Type::TENSOR_QUANT8_ASYMM, {1, 1, 4, 4}, 0.25f, 128);
+  OperandType type14(Type::TENSOR_QUANT8_ASYMM, {4, 1, 2, 2}, 0.25f, 128);
+  OperandType type2(Type::TENSOR_FLOAT32, {4, 4});
+  OperandType type4(Type::TENSOR_INT32, {2});
+  OperandType type5(Type::FLOAT32, {});
+  // Phase 1, operands
+  auto in = model->addOperand(&type13);
+  auto roi = model->addOperand(&type2);
+  auto param = model->addOperand(&type4);
+  auto param1 = model->addOperand(&type5);
+  auto layout = model->addOperand(&type0);
+  auto out = model->addOperand(&type14);
+  // Phase 2, operations
+  static int32_t param_init[] = {2, 2};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
+  static float param1_init[] = {0.5f};
+  model->setOperandValue(param1, param1_init, sizeof(float) * 1);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_ROI_POOLING, {in, roi, param, param1, layout}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in, roi},
+    {out});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nchw_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nhwc_2(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type4(Type::TENSOR_INT32, {2});
+  OperandType type5(Type::FLOAT32, {});
+  OperandType type6(Type::TENSOR_FLOAT32, {2, 4, 8, 2});
+  OperandType type7(Type::TENSOR_FLOAT32, {4, 5});
+  OperandType type8(Type::TENSOR_FLOAT32, {4, 2, 3, 2});
+  // Phase 1, operands
+  auto in1 = model->addOperand(&type6);
+  auto roi1 = model->addOperand(&type7);
+  auto param2 = model->addOperand(&type4);
+  auto param3 = model->addOperand(&type5);
+  auto layout = model->addOperand(&type0);
+  auto out1 = model->addOperand(&type8);
+  // Phase 2, operations
+  static int32_t param2_init[] = {2, 3};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 2);
+  static float param3_init[] = {0.25f};
+  model->setOperandValue(param3, param3_init, sizeof(float) * 1);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_ROI_POOLING, {in1, roi1, param2, param3, layout}, {out1});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in1, roi1},
+    {out1});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nhwc_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nhwc_relaxed_2(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type4(Type::TENSOR_INT32, {2});
+  OperandType type5(Type::FLOAT32, {});
+  OperandType type6(Type::TENSOR_FLOAT32, {2, 4, 8, 2});
+  OperandType type7(Type::TENSOR_FLOAT32, {4, 5});
+  OperandType type8(Type::TENSOR_FLOAT32, {4, 2, 3, 2});
+  // Phase 1, operands
+  auto in1 = model->addOperand(&type6);
+  auto roi1 = model->addOperand(&type7);
+  auto param2 = model->addOperand(&type4);
+  auto param3 = model->addOperand(&type5);
+  auto layout = model->addOperand(&type0);
+  auto out1 = model->addOperand(&type8);
+  // Phase 2, operations
+  static int32_t param2_init[] = {2, 3};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 2);
+  static float param3_init[] = {0.25f};
+  model->setOperandValue(param3, param3_init, sizeof(float) * 1);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_ROI_POOLING, {in1, roi1, param2, param3, layout}, {out1});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in1, roi1},
+    {out1});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nhwc_relaxed_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nhwc_quant8_2(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type15(Type::TENSOR_QUANT8_ASYMM, {2, 4, 8, 2}, 0.04f, 0);
+  OperandType type16(Type::TENSOR_QUANT8_ASYMM, {4, 2, 3, 2}, 0.04f, 0);
+  OperandType type4(Type::TENSOR_INT32, {2});
+  OperandType type5(Type::FLOAT32, {});
+  OperandType type7(Type::TENSOR_FLOAT32, {4, 5});
+  // Phase 1, operands
+  auto in1 = model->addOperand(&type15);
+  auto roi1 = model->addOperand(&type7);
+  auto param2 = model->addOperand(&type4);
+  auto param3 = model->addOperand(&type5);
+  auto layout = model->addOperand(&type0);
+  auto out1 = model->addOperand(&type16);
+  // Phase 2, operations
+  static int32_t param2_init[] = {2, 3};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 2);
+  static float param3_init[] = {0.25f};
+  model->setOperandValue(param3, param3_init, sizeof(float) * 1);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_ROI_POOLING, {in1, roi1, param2, param3, layout}, {out1});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in1, roi1},
+    {out1});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nhwc_quant8_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nchw_2(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type17(Type::TENSOR_FLOAT32, {2, 2, 4, 8});
+  OperandType type18(Type::TENSOR_FLOAT32, {4, 2, 2, 3});
+  OperandType type4(Type::TENSOR_INT32, {2});
+  OperandType type5(Type::FLOAT32, {});
+  OperandType type7(Type::TENSOR_FLOAT32, {4, 5});
+  // Phase 1, operands
+  auto in1 = model->addOperand(&type17);
+  auto roi1 = model->addOperand(&type7);
+  auto param2 = model->addOperand(&type4);
+  auto param3 = model->addOperand(&type5);
+  auto layout = model->addOperand(&type0);
+  auto out1 = model->addOperand(&type18);
+  // Phase 2, operations
+  static int32_t param2_init[] = {2, 3};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 2);
+  static float param3_init[] = {0.25f};
+  model->setOperandValue(param3, param3_init, sizeof(float) * 1);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_ROI_POOLING, {in1, roi1, param2, param3, layout}, {out1});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in1, roi1},
+    {out1});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nchw_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nchw_relaxed_2(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type17(Type::TENSOR_FLOAT32, {2, 2, 4, 8});
+  OperandType type18(Type::TENSOR_FLOAT32, {4, 2, 2, 3});
+  OperandType type4(Type::TENSOR_INT32, {2});
+  OperandType type5(Type::FLOAT32, {});
+  OperandType type7(Type::TENSOR_FLOAT32, {4, 5});
+  // Phase 1, operands
+  auto in1 = model->addOperand(&type17);
+  auto roi1 = model->addOperand(&type7);
+  auto param2 = model->addOperand(&type4);
+  auto param3 = model->addOperand(&type5);
+  auto layout = model->addOperand(&type0);
+  auto out1 = model->addOperand(&type18);
+  // Phase 2, operations
+  static int32_t param2_init[] = {2, 3};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 2);
+  static float param3_init[] = {0.25f};
+  model->setOperandValue(param3, param3_init, sizeof(float) * 1);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_ROI_POOLING, {in1, roi1, param2, param3, layout}, {out1});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in1, roi1},
+    {out1});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nchw_relaxed_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nchw_quant8_2(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type19(Type::TENSOR_QUANT8_ASYMM, {2, 2, 4, 8}, 0.04f, 0);
+  OperandType type20(Type::TENSOR_QUANT8_ASYMM, {4, 2, 2, 3}, 0.04f, 0);
+  OperandType type4(Type::TENSOR_INT32, {2});
+  OperandType type5(Type::FLOAT32, {});
+  OperandType type7(Type::TENSOR_FLOAT32, {4, 5});
+  // Phase 1, operands
+  auto in1 = model->addOperand(&type19);
+  auto roi1 = model->addOperand(&type7);
+  auto param2 = model->addOperand(&type4);
+  auto param3 = model->addOperand(&type5);
+  auto layout = model->addOperand(&type0);
+  auto out1 = model->addOperand(&type20);
+  // Phase 2, operations
+  static int32_t param2_init[] = {2, 3};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 2);
+  static float param3_init[] = {0.25f};
+  model->setOperandValue(param3, param3_init, sizeof(float) * 1);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_ROI_POOLING, {in1, roi1, param2, param3, layout}, {out1});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in1, roi1},
+    {out1});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nchw_quant8_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/models/slice.model.cpp b/nn/runtime/test/generated/models/slice.model.cpp
new file mode 100644
index 0000000..078ba36
--- /dev/null
+++ b/nn/runtime/test/generated/models/slice.model.cpp
@@ -0,0 +1,567 @@
+// clang-format off
+// Generated file (from: slice.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {4});
+  OperandType type1(Type::TENSOR_INT32, {1});
+  OperandType type2(Type::TENSOR_FLOAT32, {2});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto begin = model->addOperand(&type1);
+  auto size = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input, begin, size}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input, begin, size},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {4});
+  OperandType type1(Type::TENSOR_INT32, {1});
+  OperandType type2(Type::TENSOR_FLOAT32, {2});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto begin = model->addOperand(&type1);
+  auto size = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input, begin, size}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input, begin, size},
+    {output});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16(Model *model) {
+  OperandType type1(Type::TENSOR_INT32, {1});
+  OperandType type16(Type::TENSOR_FLOAT16, {4});
+  OperandType type17(Type::TENSOR_FLOAT16, {2});
+  // Phase 1, operands
+  auto input = model->addOperand(&type16);
+  auto begin = model->addOperand(&type1);
+  auto size = model->addOperand(&type1);
+  auto output = model->addOperand(&type17);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input, begin, size}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input, begin, size},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_2(Model *model) {
+  OperandType type3(Type::TENSOR_FLOAT32, {2, 3});
+  OperandType type4(Type::TENSOR_INT32, {2});
+  OperandType type5(Type::TENSOR_FLOAT32, {1, 2});
+  // Phase 1, operands
+  auto input1 = model->addOperand(&type3);
+  auto begin1 = model->addOperand(&type4);
+  auto size1 = model->addOperand(&type4);
+  auto output1 = model->addOperand(&type5);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input1, begin1, size1}, {output1});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input1, begin1, size1},
+    {output1});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed_2(Model *model) {
+  OperandType type3(Type::TENSOR_FLOAT32, {2, 3});
+  OperandType type4(Type::TENSOR_INT32, {2});
+  OperandType type5(Type::TENSOR_FLOAT32, {1, 2});
+  // Phase 1, operands
+  auto input1 = model->addOperand(&type3);
+  auto begin1 = model->addOperand(&type4);
+  auto size1 = model->addOperand(&type4);
+  auto output1 = model->addOperand(&type5);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input1, begin1, size1}, {output1});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input1, begin1, size1},
+    {output1});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16_2(Model *model) {
+  OperandType type18(Type::TENSOR_FLOAT16, {2, 3});
+  OperandType type19(Type::TENSOR_FLOAT16, {1, 2});
+  OperandType type4(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input1 = model->addOperand(&type18);
+  auto begin1 = model->addOperand(&type4);
+  auto size1 = model->addOperand(&type4);
+  auto output1 = model->addOperand(&type19);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input1, begin1, size1}, {output1});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input1, begin1, size1},
+    {output1});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_3(Model *model) {
+  OperandType type6(Type::TENSOR_FLOAT32, {2, 3, 2});
+  OperandType type7(Type::TENSOR_INT32, {3});
+  // Phase 1, operands
+  auto input2 = model->addOperand(&type6);
+  auto begin2 = model->addOperand(&type7);
+  auto size2 = model->addOperand(&type7);
+  auto output2 = model->addOperand(&type6);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input2, begin2, size2}, {output2});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input2, begin2, size2},
+    {output2});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed_3(Model *model) {
+  OperandType type6(Type::TENSOR_FLOAT32, {2, 3, 2});
+  OperandType type7(Type::TENSOR_INT32, {3});
+  // Phase 1, operands
+  auto input2 = model->addOperand(&type6);
+  auto begin2 = model->addOperand(&type7);
+  auto size2 = model->addOperand(&type7);
+  auto output2 = model->addOperand(&type6);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input2, begin2, size2}, {output2});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input2, begin2, size2},
+    {output2});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16_3(Model *model) {
+  OperandType type20(Type::TENSOR_FLOAT16, {2, 3, 2});
+  OperandType type7(Type::TENSOR_INT32, {3});
+  // Phase 1, operands
+  auto input2 = model->addOperand(&type20);
+  auto begin2 = model->addOperand(&type7);
+  auto size2 = model->addOperand(&type7);
+  auto output2 = model->addOperand(&type20);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input2, begin2, size2}, {output2});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input2, begin2, size2},
+    {output2});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_4(Model *model) {
+  OperandType type10(Type::TENSOR_FLOAT32, {3, 1, 1, 1});
+  OperandType type8(Type::TENSOR_FLOAT32, {4, 1, 1, 1});
+  OperandType type9(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input3 = model->addOperand(&type8);
+  auto begin3 = model->addOperand(&type9);
+  auto size3 = model->addOperand(&type9);
+  auto output3 = model->addOperand(&type10);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input3, begin3, size3}, {output3});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input3, begin3, size3},
+    {output3});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed_4(Model *model) {
+  OperandType type10(Type::TENSOR_FLOAT32, {3, 1, 1, 1});
+  OperandType type8(Type::TENSOR_FLOAT32, {4, 1, 1, 1});
+  OperandType type9(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input3 = model->addOperand(&type8);
+  auto begin3 = model->addOperand(&type9);
+  auto size3 = model->addOperand(&type9);
+  auto output3 = model->addOperand(&type10);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input3, begin3, size3}, {output3});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input3, begin3, size3},
+    {output3});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16_4(Model *model) {
+  OperandType type21(Type::TENSOR_FLOAT16, {4, 1, 1, 1});
+  OperandType type22(Type::TENSOR_FLOAT16, {3, 1, 1, 1});
+  OperandType type9(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input3 = model->addOperand(&type21);
+  auto begin3 = model->addOperand(&type9);
+  auto size3 = model->addOperand(&type9);
+  auto output3 = model->addOperand(&type22);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input3, begin3, size3}, {output3});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input3, begin3, size3},
+    {output3});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_5(Model *model) {
+  OperandType type11(Type::TENSOR_INT32, {3, 2, 3, 1});
+  OperandType type12(Type::TENSOR_INT32, {1, 1, 3, 1});
+  OperandType type9(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input4 = model->addOperand(&type11);
+  auto begin4 = model->addOperand(&type9);
+  auto size4 = model->addOperand(&type9);
+  auto output4 = model->addOperand(&type12);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input4, begin4, size4}, {output4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input4, begin4, size4},
+    {output4});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_5(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed_5(Model *model) {
+  OperandType type11(Type::TENSOR_INT32, {3, 2, 3, 1});
+  OperandType type12(Type::TENSOR_INT32, {1, 1, 3, 1});
+  OperandType type9(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input4 = model->addOperand(&type11);
+  auto begin4 = model->addOperand(&type9);
+  auto size4 = model->addOperand(&type9);
+  auto output4 = model->addOperand(&type12);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input4, begin4, size4}, {output4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input4, begin4, size4},
+    {output4});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed_5(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16_5(Model *model) {
+  OperandType type11(Type::TENSOR_INT32, {3, 2, 3, 1});
+  OperandType type12(Type::TENSOR_INT32, {1, 1, 3, 1});
+  OperandType type9(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input4 = model->addOperand(&type11);
+  auto begin4 = model->addOperand(&type9);
+  auto size4 = model->addOperand(&type9);
+  auto output4 = model->addOperand(&type12);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input4, begin4, size4}, {output4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input4, begin4, size4},
+    {output4});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16_5(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_6(Model *model) {
+  OperandType type11(Type::TENSOR_INT32, {3, 2, 3, 1});
+  OperandType type13(Type::TENSOR_INT32, {2, 1, 3, 1});
+  OperandType type9(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input5 = model->addOperand(&type11);
+  auto begin5 = model->addOperand(&type9);
+  auto size5 = model->addOperand(&type9);
+  auto output5 = model->addOperand(&type13);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input5, begin5, size5}, {output5});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input5, begin5, size5},
+    {output5});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_6(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed_6(Model *model) {
+  OperandType type11(Type::TENSOR_INT32, {3, 2, 3, 1});
+  OperandType type13(Type::TENSOR_INT32, {2, 1, 3, 1});
+  OperandType type9(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input5 = model->addOperand(&type11);
+  auto begin5 = model->addOperand(&type9);
+  auto size5 = model->addOperand(&type9);
+  auto output5 = model->addOperand(&type13);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input5, begin5, size5}, {output5});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input5, begin5, size5},
+    {output5});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed_6(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16_6(Model *model) {
+  OperandType type11(Type::TENSOR_INT32, {3, 2, 3, 1});
+  OperandType type13(Type::TENSOR_INT32, {2, 1, 3, 1});
+  OperandType type9(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input5 = model->addOperand(&type11);
+  auto begin5 = model->addOperand(&type9);
+  auto size5 = model->addOperand(&type9);
+  auto output5 = model->addOperand(&type13);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input5, begin5, size5}, {output5});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input5, begin5, size5},
+    {output5});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16_6(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_7(Model *model) {
+  OperandType type14(Type::TENSOR_QUANT8_ASYMM, {3, 2, 3, 1}, 2.0f, 128);
+  OperandType type15(Type::TENSOR_QUANT8_ASYMM, {2, 1, 3, 1}, 2.0f, 128);
+  OperandType type9(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input6 = model->addOperand(&type14);
+  auto begin6 = model->addOperand(&type9);
+  auto size6 = model->addOperand(&type9);
+  auto output6 = model->addOperand(&type15);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input6, begin6, size6}, {output6});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input6, begin6, size6},
+    {output6});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_7(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed_7(Model *model) {
+  OperandType type14(Type::TENSOR_QUANT8_ASYMM, {3, 2, 3, 1}, 2.0f, 128);
+  OperandType type15(Type::TENSOR_QUANT8_ASYMM, {2, 1, 3, 1}, 2.0f, 128);
+  OperandType type9(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input6 = model->addOperand(&type14);
+  auto begin6 = model->addOperand(&type9);
+  auto size6 = model->addOperand(&type9);
+  auto output6 = model->addOperand(&type15);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input6, begin6, size6}, {output6});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input6, begin6, size6},
+    {output6});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed_7(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16_7(Model *model) {
+  OperandType type14(Type::TENSOR_QUANT8_ASYMM, {3, 2, 3, 1}, 2.0f, 128);
+  OperandType type15(Type::TENSOR_QUANT8_ASYMM, {2, 1, 3, 1}, 2.0f, 128);
+  OperandType type9(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input6 = model->addOperand(&type14);
+  auto begin6 = model->addOperand(&type9);
+  auto size6 = model->addOperand(&type9);
+  auto output6 = model->addOperand(&type15);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input6, begin6, size6}, {output6});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input6, begin6, size6},
+    {output6});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16_7(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_8(Model *model) {
+  OperandType type11(Type::TENSOR_INT32, {3, 2, 3, 1});
+  OperandType type13(Type::TENSOR_INT32, {2, 1, 3, 1});
+  OperandType type9(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input7 = model->addOperand(&type11);
+  auto begin7 = model->addOperand(&type9);
+  auto size7 = model->addOperand(&type9);
+  auto output7 = model->addOperand(&type13);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input7, begin7, size7}, {output7});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input7, begin7, size7},
+    {output7});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed_8(Model *model) {
+  OperandType type11(Type::TENSOR_INT32, {3, 2, 3, 1});
+  OperandType type13(Type::TENSOR_INT32, {2, 1, 3, 1});
+  OperandType type9(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input7 = model->addOperand(&type11);
+  auto begin7 = model->addOperand(&type9);
+  auto size7 = model->addOperand(&type9);
+  auto output7 = model->addOperand(&type13);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input7, begin7, size7}, {output7});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input7, begin7, size7},
+    {output7});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed_8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16_8(Model *model) {
+  OperandType type11(Type::TENSOR_INT32, {3, 2, 3, 1});
+  OperandType type13(Type::TENSOR_INT32, {2, 1, 3, 1});
+  OperandType type9(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input7 = model->addOperand(&type11);
+  auto begin7 = model->addOperand(&type9);
+  auto size7 = model->addOperand(&type9);
+  auto output7 = model->addOperand(&type13);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input7, begin7, size7}, {output7});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input7, begin7, size7},
+    {output7});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16_8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/models/space_to_batch_v1_2.model.cpp b/nn/runtime/test/generated/models/space_to_batch_v1_2.model.cpp
index 2194c04..ed07ba4 100644
--- a/nn/runtime/test/generated/models/space_to_batch_v1_2.model.cpp
+++ b/nn/runtime/test/generated/models/space_to_batch_v1_2.model.cpp
@@ -66,10 +66,10 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nhwc_quant8(Model *model) {
+void CreateModel_nhwc_float16(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.1f, 0);
-  OperandType type12(Type::TENSOR_QUANT8_ASYMM, {4, 1, 1, 2}, 0.1f, 0);
+  OperandType type11(Type::TENSOR_FLOAT16, {1, 2, 2, 2});
+  OperandType type12(Type::TENSOR_FLOAT16, {4, 1, 1, 2});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
   OperandType type4(Type::TENSOR_INT32, {2});
   // Phase 1, operands
@@ -93,6 +93,38 @@
   assert(model->isValid());
 }
 
+inline bool is_ignored_nhwc_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nhwc_quant8(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type13(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.1f, 0);
+  OperandType type14(Type::TENSOR_QUANT8_ASYMM, {4, 1, 1, 2}, 0.1f, 0);
+  OperandType type2(Type::TENSOR_INT32, {2, 2});
+  OperandType type4(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type13);
+  auto param = model->addOperand(&type4);
+  auto paddings = model->addOperand(&type2);
+  auto layout = model->addOperand(&type0);
+  auto op4 = model->addOperand(&type14);
+  // Phase 2, operations
+  static int32_t param_init[] = {2, 2};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
+  static int32_t paddings_init[] = {0, 0, 0, 0};
+  model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 4);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_SPACE_TO_BATCH_ND, {op1, param, paddings, layout}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op4});
+  assert(model->isValid());
+}
+
 inline bool is_ignored_nhwc_quant8(int i) {
   static std::set<int> ignore = {};
   return ignore.find(i) != ignore.end();
@@ -101,7 +133,7 @@
 void CreateModel_nchw(Model *model) {
   OperandType type0(Type::BOOL, {});
   OperandType type1(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
-  OperandType type13(Type::TENSOR_FLOAT32, {4, 2, 1, 1});
+  OperandType type15(Type::TENSOR_FLOAT32, {4, 2, 1, 1});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
   OperandType type4(Type::TENSOR_INT32, {2});
   // Phase 1, operands
@@ -109,7 +141,7 @@
   auto param = model->addOperand(&type4);
   auto paddings = model->addOperand(&type2);
   auto layout = model->addOperand(&type0);
-  auto op4 = model->addOperand(&type13);
+  auto op4 = model->addOperand(&type15);
   // Phase 2, operations
   static int32_t param_init[] = {2, 2};
   model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
@@ -133,7 +165,7 @@
 void CreateModel_nchw_relaxed(Model *model) {
   OperandType type0(Type::BOOL, {});
   OperandType type1(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
-  OperandType type13(Type::TENSOR_FLOAT32, {4, 2, 1, 1});
+  OperandType type15(Type::TENSOR_FLOAT32, {4, 2, 1, 1});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
   OperandType type4(Type::TENSOR_INT32, {2});
   // Phase 1, operands
@@ -141,7 +173,7 @@
   auto param = model->addOperand(&type4);
   auto paddings = model->addOperand(&type2);
   auto layout = model->addOperand(&type0);
-  auto op4 = model->addOperand(&type13);
+  auto op4 = model->addOperand(&type15);
   // Phase 2, operations
   static int32_t param_init[] = {2, 2};
   model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
@@ -164,10 +196,10 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nchw_quant8(Model *model) {
+void CreateModel_nchw_float16(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.1f, 0);
-  OperandType type14(Type::TENSOR_QUANT8_ASYMM, {4, 2, 1, 1}, 0.1f, 0);
+  OperandType type11(Type::TENSOR_FLOAT16, {1, 2, 2, 2});
+  OperandType type16(Type::TENSOR_FLOAT16, {4, 2, 1, 1});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
   OperandType type4(Type::TENSOR_INT32, {2});
   // Phase 1, operands
@@ -175,7 +207,39 @@
   auto param = model->addOperand(&type4);
   auto paddings = model->addOperand(&type2);
   auto layout = model->addOperand(&type0);
-  auto op4 = model->addOperand(&type14);
+  auto op4 = model->addOperand(&type16);
+  // Phase 2, operations
+  static int32_t param_init[] = {2, 2};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
+  static int32_t paddings_init[] = {0, 0, 0, 0};
+  model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 4);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_SPACE_TO_BATCH_ND, {op1, param, paddings, layout}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op4});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nchw_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nchw_quant8(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type13(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.1f, 0);
+  OperandType type17(Type::TENSOR_QUANT8_ASYMM, {4, 2, 1, 1}, 0.1f, 0);
+  OperandType type2(Type::TENSOR_INT32, {2, 2});
+  OperandType type4(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type13);
+  auto param = model->addOperand(&type4);
+  auto paddings = model->addOperand(&type2);
+  auto layout = model->addOperand(&type0);
+  auto op4 = model->addOperand(&type17);
   // Phase 2, operations
   static int32_t param_init[] = {2, 2};
   model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
@@ -262,18 +326,50 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nhwc_quant8_2(Model *model) {
+void CreateModel_nhwc_float16_2(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type15(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 1}, 0.5f, 0);
-  OperandType type16(Type::TENSOR_QUANT8_ASYMM, {4, 2, 2, 1}, 0.5f, 0);
+  OperandType type18(Type::TENSOR_FLOAT16, {1, 4, 4, 1});
+  OperandType type19(Type::TENSOR_FLOAT16, {4, 2, 2, 1});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
   OperandType type4(Type::TENSOR_INT32, {2});
   // Phase 1, operands
-  auto op11 = model->addOperand(&type15);
+  auto op11 = model->addOperand(&type18);
   auto param1 = model->addOperand(&type4);
   auto paddings = model->addOperand(&type2);
   auto layout = model->addOperand(&type0);
-  auto op41 = model->addOperand(&type16);
+  auto op41 = model->addOperand(&type19);
+  // Phase 2, operations
+  static int32_t param1_init[] = {2, 2};
+  model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
+  static int32_t paddings_init[] = {0, 0, 0, 0};
+  model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 4);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_SPACE_TO_BATCH_ND, {op11, param1, paddings, layout}, {op41});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op11},
+    {op41});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nhwc_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nhwc_quant8_2(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type2(Type::TENSOR_INT32, {2, 2});
+  OperandType type20(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 1}, 0.5f, 0);
+  OperandType type21(Type::TENSOR_QUANT8_ASYMM, {4, 2, 2, 1}, 0.5f, 0);
+  OperandType type4(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto op11 = model->addOperand(&type20);
+  auto param1 = model->addOperand(&type4);
+  auto paddings = model->addOperand(&type2);
+  auto layout = model->addOperand(&type0);
+  auto op41 = model->addOperand(&type21);
   // Phase 2, operations
   static int32_t param1_init[] = {2, 2};
   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
@@ -296,16 +392,16 @@
 
 void CreateModel_nchw_2(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type17(Type::TENSOR_FLOAT32, {1, 1, 4, 4});
-  OperandType type18(Type::TENSOR_FLOAT32, {4, 1, 2, 2});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
+  OperandType type22(Type::TENSOR_FLOAT32, {1, 1, 4, 4});
+  OperandType type23(Type::TENSOR_FLOAT32, {4, 1, 2, 2});
   OperandType type4(Type::TENSOR_INT32, {2});
   // Phase 1, operands
-  auto op11 = model->addOperand(&type17);
+  auto op11 = model->addOperand(&type22);
   auto param1 = model->addOperand(&type4);
   auto paddings = model->addOperand(&type2);
   auto layout = model->addOperand(&type0);
-  auto op41 = model->addOperand(&type18);
+  auto op41 = model->addOperand(&type23);
   // Phase 2, operations
   static int32_t param1_init[] = {2, 2};
   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
@@ -328,16 +424,16 @@
 
 void CreateModel_nchw_relaxed_2(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type17(Type::TENSOR_FLOAT32, {1, 1, 4, 4});
-  OperandType type18(Type::TENSOR_FLOAT32, {4, 1, 2, 2});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
+  OperandType type22(Type::TENSOR_FLOAT32, {1, 1, 4, 4});
+  OperandType type23(Type::TENSOR_FLOAT32, {4, 1, 2, 2});
   OperandType type4(Type::TENSOR_INT32, {2});
   // Phase 1, operands
-  auto op11 = model->addOperand(&type17);
+  auto op11 = model->addOperand(&type22);
   auto param1 = model->addOperand(&type4);
   auto paddings = model->addOperand(&type2);
   auto layout = model->addOperand(&type0);
-  auto op41 = model->addOperand(&type18);
+  auto op41 = model->addOperand(&type23);
   // Phase 2, operations
   static int32_t param1_init[] = {2, 2};
   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
@@ -360,18 +456,50 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nchw_quant8_2(Model *model) {
+void CreateModel_nchw_float16_2(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type19(Type::TENSOR_QUANT8_ASYMM, {1, 1, 4, 4}, 0.5f, 0);
   OperandType type2(Type::TENSOR_INT32, {2, 2});
-  OperandType type20(Type::TENSOR_QUANT8_ASYMM, {4, 1, 2, 2}, 0.5f, 0);
+  OperandType type24(Type::TENSOR_FLOAT16, {1, 1, 4, 4});
+  OperandType type25(Type::TENSOR_FLOAT16, {4, 1, 2, 2});
   OperandType type4(Type::TENSOR_INT32, {2});
   // Phase 1, operands
-  auto op11 = model->addOperand(&type19);
+  auto op11 = model->addOperand(&type24);
   auto param1 = model->addOperand(&type4);
   auto paddings = model->addOperand(&type2);
   auto layout = model->addOperand(&type0);
-  auto op41 = model->addOperand(&type20);
+  auto op41 = model->addOperand(&type25);
+  // Phase 2, operations
+  static int32_t param1_init[] = {2, 2};
+  model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
+  static int32_t paddings_init[] = {0, 0, 0, 0};
+  model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 4);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_SPACE_TO_BATCH_ND, {op11, param1, paddings, layout}, {op41});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op11},
+    {op41});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nchw_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nchw_quant8_2(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type2(Type::TENSOR_INT32, {2, 2});
+  OperandType type26(Type::TENSOR_QUANT8_ASYMM, {1, 1, 4, 4}, 0.5f, 0);
+  OperandType type27(Type::TENSOR_QUANT8_ASYMM, {4, 1, 2, 2}, 0.5f, 0);
+  OperandType type4(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto op11 = model->addOperand(&type26);
+  auto param1 = model->addOperand(&type4);
+  auto paddings = model->addOperand(&type2);
+  auto layout = model->addOperand(&type0);
+  auto op41 = model->addOperand(&type27);
   // Phase 2, operations
   static int32_t param1_init[] = {2, 2};
   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
@@ -458,18 +586,50 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nhwc_quant8_3(Model *model) {
+void CreateModel_nhwc_float16_3(Model *model) {
   OperandType type0(Type::BOOL, {});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
-  OperandType type21(Type::TENSOR_QUANT8_ASYMM, {1, 5, 2, 1}, 0.5f, 0);
-  OperandType type22(Type::TENSOR_QUANT8_ASYMM, {6, 2, 2, 1}, 0.5f, 0);
+  OperandType type28(Type::TENSOR_FLOAT16, {1, 5, 2, 1});
+  OperandType type29(Type::TENSOR_FLOAT16, {6, 2, 2, 1});
   OperandType type4(Type::TENSOR_INT32, {2});
   // Phase 1, operands
-  auto op12 = model->addOperand(&type21);
+  auto op12 = model->addOperand(&type28);
   auto param2 = model->addOperand(&type4);
   auto paddings1 = model->addOperand(&type2);
   auto layout = model->addOperand(&type0);
-  auto op42 = model->addOperand(&type22);
+  auto op42 = model->addOperand(&type29);
+  // Phase 2, operations
+  static int32_t param2_init[] = {3, 2};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 2);
+  static int32_t paddings1_init[] = {1, 0, 2, 0};
+  model->setOperandValue(paddings1, paddings1_init, sizeof(int32_t) * 4);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_SPACE_TO_BATCH_ND, {op12, param2, paddings1, layout}, {op42});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op12},
+    {op42});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nhwc_float16_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nhwc_quant8_3(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type2(Type::TENSOR_INT32, {2, 2});
+  OperandType type30(Type::TENSOR_QUANT8_ASYMM, {1, 5, 2, 1}, 0.5f, 0);
+  OperandType type31(Type::TENSOR_QUANT8_ASYMM, {6, 2, 2, 1}, 0.5f, 0);
+  OperandType type4(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto op12 = model->addOperand(&type30);
+  auto param2 = model->addOperand(&type4);
+  auto paddings1 = model->addOperand(&type2);
+  auto layout = model->addOperand(&type0);
+  auto op42 = model->addOperand(&type31);
   // Phase 2, operations
   static int32_t param2_init[] = {3, 2};
   model->setOperandValue(param2, param2_init, sizeof(int32_t) * 2);
@@ -493,15 +653,15 @@
 void CreateModel_nchw_3(Model *model) {
   OperandType type0(Type::BOOL, {});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
-  OperandType type23(Type::TENSOR_FLOAT32, {1, 1, 5, 2});
-  OperandType type24(Type::TENSOR_FLOAT32, {6, 1, 2, 2});
+  OperandType type32(Type::TENSOR_FLOAT32, {1, 1, 5, 2});
+  OperandType type33(Type::TENSOR_FLOAT32, {6, 1, 2, 2});
   OperandType type4(Type::TENSOR_INT32, {2});
   // Phase 1, operands
-  auto op12 = model->addOperand(&type23);
+  auto op12 = model->addOperand(&type32);
   auto param2 = model->addOperand(&type4);
   auto paddings1 = model->addOperand(&type2);
   auto layout = model->addOperand(&type0);
-  auto op42 = model->addOperand(&type24);
+  auto op42 = model->addOperand(&type33);
   // Phase 2, operations
   static int32_t param2_init[] = {3, 2};
   model->setOperandValue(param2, param2_init, sizeof(int32_t) * 2);
@@ -525,15 +685,15 @@
 void CreateModel_nchw_relaxed_3(Model *model) {
   OperandType type0(Type::BOOL, {});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
-  OperandType type23(Type::TENSOR_FLOAT32, {1, 1, 5, 2});
-  OperandType type24(Type::TENSOR_FLOAT32, {6, 1, 2, 2});
+  OperandType type32(Type::TENSOR_FLOAT32, {1, 1, 5, 2});
+  OperandType type33(Type::TENSOR_FLOAT32, {6, 1, 2, 2});
   OperandType type4(Type::TENSOR_INT32, {2});
   // Phase 1, operands
-  auto op12 = model->addOperand(&type23);
+  auto op12 = model->addOperand(&type32);
   auto param2 = model->addOperand(&type4);
   auto paddings1 = model->addOperand(&type2);
   auto layout = model->addOperand(&type0);
-  auto op42 = model->addOperand(&type24);
+  auto op42 = model->addOperand(&type33);
   // Phase 2, operations
   static int32_t param2_init[] = {3, 2};
   model->setOperandValue(param2, param2_init, sizeof(int32_t) * 2);
@@ -556,18 +716,50 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nchw_quant8_3(Model *model) {
+void CreateModel_nchw_float16_3(Model *model) {
   OperandType type0(Type::BOOL, {});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
-  OperandType type25(Type::TENSOR_QUANT8_ASYMM, {1, 1, 5, 2}, 0.5f, 0);
-  OperandType type26(Type::TENSOR_QUANT8_ASYMM, {6, 1, 2, 2}, 0.5f, 0);
+  OperandType type34(Type::TENSOR_FLOAT16, {1, 1, 5, 2});
+  OperandType type35(Type::TENSOR_FLOAT16, {6, 1, 2, 2});
   OperandType type4(Type::TENSOR_INT32, {2});
   // Phase 1, operands
-  auto op12 = model->addOperand(&type25);
+  auto op12 = model->addOperand(&type34);
   auto param2 = model->addOperand(&type4);
   auto paddings1 = model->addOperand(&type2);
   auto layout = model->addOperand(&type0);
-  auto op42 = model->addOperand(&type26);
+  auto op42 = model->addOperand(&type35);
+  // Phase 2, operations
+  static int32_t param2_init[] = {3, 2};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 2);
+  static int32_t paddings1_init[] = {1, 0, 2, 0};
+  model->setOperandValue(paddings1, paddings1_init, sizeof(int32_t) * 4);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_SPACE_TO_BATCH_ND, {op12, param2, paddings1, layout}, {op42});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op12},
+    {op42});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nchw_float16_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nchw_quant8_3(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type2(Type::TENSOR_INT32, {2, 2});
+  OperandType type36(Type::TENSOR_QUANT8_ASYMM, {1, 1, 5, 2}, 0.5f, 0);
+  OperandType type37(Type::TENSOR_QUANT8_ASYMM, {6, 1, 2, 2}, 0.5f, 0);
+  OperandType type4(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto op12 = model->addOperand(&type36);
+  auto param2 = model->addOperand(&type4);
+  auto paddings1 = model->addOperand(&type2);
+  auto layout = model->addOperand(&type0);
+  auto op42 = model->addOperand(&type37);
   // Phase 2, operations
   static int32_t param2_init[] = {3, 2};
   model->setOperandValue(param2, param2_init, sizeof(int32_t) * 2);
@@ -654,18 +846,50 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nhwc_quant8_4(Model *model) {
+void CreateModel_nhwc_float16_4(Model *model) {
   OperandType type0(Type::BOOL, {});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
-  OperandType type27(Type::TENSOR_QUANT8_ASYMM, {1, 4, 2, 1}, 0.25f, 0);
-  OperandType type28(Type::TENSOR_QUANT8_ASYMM, {6, 2, 4, 1}, 0.25f, 0);
+  OperandType type38(Type::TENSOR_FLOAT16, {1, 4, 2, 1});
+  OperandType type39(Type::TENSOR_FLOAT16, {6, 2, 4, 1});
   OperandType type4(Type::TENSOR_INT32, {2});
   // Phase 1, operands
-  auto op13 = model->addOperand(&type27);
+  auto op13 = model->addOperand(&type38);
   auto param3 = model->addOperand(&type4);
   auto paddings2 = model->addOperand(&type2);
   auto layout = model->addOperand(&type0);
-  auto op43 = model->addOperand(&type28);
+  auto op43 = model->addOperand(&type39);
+  // Phase 2, operations
+  static int32_t param3_init[] = {3, 2};
+  model->setOperandValue(param3, param3_init, sizeof(int32_t) * 2);
+  static int32_t paddings2_init[] = {1, 1, 2, 4};
+  model->setOperandValue(paddings2, paddings2_init, sizeof(int32_t) * 4);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_SPACE_TO_BATCH_ND, {op13, param3, paddings2, layout}, {op43});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op13},
+    {op43});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nhwc_float16_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nhwc_quant8_4(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type2(Type::TENSOR_INT32, {2, 2});
+  OperandType type4(Type::TENSOR_INT32, {2});
+  OperandType type40(Type::TENSOR_QUANT8_ASYMM, {1, 4, 2, 1}, 0.25f, 0);
+  OperandType type41(Type::TENSOR_QUANT8_ASYMM, {6, 2, 4, 1}, 0.25f, 0);
+  // Phase 1, operands
+  auto op13 = model->addOperand(&type40);
+  auto param3 = model->addOperand(&type4);
+  auto paddings2 = model->addOperand(&type2);
+  auto layout = model->addOperand(&type0);
+  auto op43 = model->addOperand(&type41);
   // Phase 2, operations
   static int32_t param3_init[] = {3, 2};
   model->setOperandValue(param3, param3_init, sizeof(int32_t) * 2);
@@ -689,15 +913,15 @@
 void CreateModel_nchw_4(Model *model) {
   OperandType type0(Type::BOOL, {});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
-  OperandType type29(Type::TENSOR_FLOAT32, {1, 1, 4, 2});
-  OperandType type30(Type::TENSOR_FLOAT32, {6, 1, 2, 4});
   OperandType type4(Type::TENSOR_INT32, {2});
+  OperandType type42(Type::TENSOR_FLOAT32, {1, 1, 4, 2});
+  OperandType type43(Type::TENSOR_FLOAT32, {6, 1, 2, 4});
   // Phase 1, operands
-  auto op13 = model->addOperand(&type29);
+  auto op13 = model->addOperand(&type42);
   auto param3 = model->addOperand(&type4);
   auto paddings2 = model->addOperand(&type2);
   auto layout = model->addOperand(&type0);
-  auto op43 = model->addOperand(&type30);
+  auto op43 = model->addOperand(&type43);
   // Phase 2, operations
   static int32_t param3_init[] = {3, 2};
   model->setOperandValue(param3, param3_init, sizeof(int32_t) * 2);
@@ -721,15 +945,15 @@
 void CreateModel_nchw_relaxed_4(Model *model) {
   OperandType type0(Type::BOOL, {});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
-  OperandType type29(Type::TENSOR_FLOAT32, {1, 1, 4, 2});
-  OperandType type30(Type::TENSOR_FLOAT32, {6, 1, 2, 4});
   OperandType type4(Type::TENSOR_INT32, {2});
+  OperandType type42(Type::TENSOR_FLOAT32, {1, 1, 4, 2});
+  OperandType type43(Type::TENSOR_FLOAT32, {6, 1, 2, 4});
   // Phase 1, operands
-  auto op13 = model->addOperand(&type29);
+  auto op13 = model->addOperand(&type42);
   auto param3 = model->addOperand(&type4);
   auto paddings2 = model->addOperand(&type2);
   auto layout = model->addOperand(&type0);
-  auto op43 = model->addOperand(&type30);
+  auto op43 = model->addOperand(&type43);
   // Phase 2, operations
   static int32_t param3_init[] = {3, 2};
   model->setOperandValue(param3, param3_init, sizeof(int32_t) * 2);
@@ -752,18 +976,50 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nchw_quant8_4(Model *model) {
+void CreateModel_nchw_float16_4(Model *model) {
   OperandType type0(Type::BOOL, {});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
-  OperandType type31(Type::TENSOR_QUANT8_ASYMM, {1, 1, 4, 2}, 0.25f, 0);
-  OperandType type32(Type::TENSOR_QUANT8_ASYMM, {6, 1, 2, 4}, 0.25f, 0);
   OperandType type4(Type::TENSOR_INT32, {2});
+  OperandType type44(Type::TENSOR_FLOAT16, {1, 1, 4, 2});
+  OperandType type45(Type::TENSOR_FLOAT16, {6, 1, 2, 4});
   // Phase 1, operands
-  auto op13 = model->addOperand(&type31);
+  auto op13 = model->addOperand(&type44);
   auto param3 = model->addOperand(&type4);
   auto paddings2 = model->addOperand(&type2);
   auto layout = model->addOperand(&type0);
-  auto op43 = model->addOperand(&type32);
+  auto op43 = model->addOperand(&type45);
+  // Phase 2, operations
+  static int32_t param3_init[] = {3, 2};
+  model->setOperandValue(param3, param3_init, sizeof(int32_t) * 2);
+  static int32_t paddings2_init[] = {1, 1, 2, 4};
+  model->setOperandValue(paddings2, paddings2_init, sizeof(int32_t) * 4);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_SPACE_TO_BATCH_ND, {op13, param3, paddings2, layout}, {op43});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op13},
+    {op43});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nchw_float16_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nchw_quant8_4(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type2(Type::TENSOR_INT32, {2, 2});
+  OperandType type4(Type::TENSOR_INT32, {2});
+  OperandType type46(Type::TENSOR_QUANT8_ASYMM, {1, 1, 4, 2}, 0.25f, 0);
+  OperandType type47(Type::TENSOR_QUANT8_ASYMM, {6, 1, 2, 4}, 0.25f, 0);
+  // Phase 1, operands
+  auto op13 = model->addOperand(&type46);
+  auto param3 = model->addOperand(&type4);
+  auto paddings2 = model->addOperand(&type2);
+  auto layout = model->addOperand(&type0);
+  auto op43 = model->addOperand(&type47);
   // Phase 2, operations
   static int32_t param3_init[] = {3, 2};
   model->setOperandValue(param3, param3_init, sizeof(int32_t) * 2);
diff --git a/nn/runtime/test/generated/models/space_to_depth_v1_2.model.cpp b/nn/runtime/test/generated/models/space_to_depth_v1_2.model.cpp
index 5660e85..8384153 100644
--- a/nn/runtime/test/generated/models/space_to_depth_v1_2.model.cpp
+++ b/nn/runtime/test/generated/models/space_to_depth_v1_2.model.cpp
@@ -58,11 +58,11 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nhwc_quant8(Model *model) {
+void CreateModel_nhwc_float16(Model *model) {
   OperandType type0(Type::BOOL, {});
   OperandType type3(Type::INT32, {});
-  OperandType type8(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.1f, 0);
-  OperandType type9(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 8}, 0.1f, 0);
+  OperandType type8(Type::TENSOR_FLOAT16, {1, 2, 2, 2});
+  OperandType type9(Type::TENSOR_FLOAT16, {1, 1, 1, 8});
   // Phase 1, operands
   auto op1 = model->addOperand(&type8);
   auto param = model->addOperand(&type3);
@@ -81,6 +81,34 @@
   assert(model->isValid());
 }
 
+inline bool is_ignored_nhwc_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nhwc_quant8(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type10(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.1f, 0);
+  OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 8}, 0.1f, 0);
+  OperandType type3(Type::INT32, {});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type10);
+  auto param = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op4 = model->addOperand(&type11);
+  // Phase 2, operations
+  static int32_t param_init[] = {2};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_SPACE_TO_DEPTH, {op1, param, layout}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op4});
+  assert(model->isValid());
+}
+
 inline bool is_ignored_nhwc_quant8(int i) {
   static std::set<int> ignore = {};
   return ignore.find(i) != ignore.end();
@@ -89,13 +117,13 @@
 void CreateModel_nchw(Model *model) {
   OperandType type0(Type::BOOL, {});
   OperandType type1(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
-  OperandType type10(Type::TENSOR_FLOAT32, {1, 8, 1, 1});
+  OperandType type12(Type::TENSOR_FLOAT32, {1, 8, 1, 1});
   OperandType type3(Type::INT32, {});
   // Phase 1, operands
   auto op1 = model->addOperand(&type1);
   auto param = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
-  auto op4 = model->addOperand(&type10);
+  auto op4 = model->addOperand(&type12);
   // Phase 2, operations
   static int32_t param_init[] = {2};
   model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
@@ -117,13 +145,13 @@
 void CreateModel_nchw_relaxed(Model *model) {
   OperandType type0(Type::BOOL, {});
   OperandType type1(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
-  OperandType type10(Type::TENSOR_FLOAT32, {1, 8, 1, 1});
+  OperandType type12(Type::TENSOR_FLOAT32, {1, 8, 1, 1});
   OperandType type3(Type::INT32, {});
   // Phase 1, operands
   auto op1 = model->addOperand(&type1);
   auto param = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
-  auto op4 = model->addOperand(&type10);
+  auto op4 = model->addOperand(&type12);
   // Phase 2, operations
   static int32_t param_init[] = {2};
   model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
@@ -144,16 +172,44 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nchw_quant8(Model *model) {
+void CreateModel_nchw_float16(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 8, 1, 1}, 0.1f, 0);
+  OperandType type13(Type::TENSOR_FLOAT16, {1, 8, 1, 1});
   OperandType type3(Type::INT32, {});
-  OperandType type8(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.1f, 0);
+  OperandType type8(Type::TENSOR_FLOAT16, {1, 2, 2, 2});
   // Phase 1, operands
   auto op1 = model->addOperand(&type8);
   auto param = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
-  auto op4 = model->addOperand(&type11);
+  auto op4 = model->addOperand(&type13);
+  // Phase 2, operations
+  static int32_t param_init[] = {2};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_SPACE_TO_DEPTH, {op1, param, layout}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op4});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nchw_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nchw_quant8(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type10(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.1f, 0);
+  OperandType type14(Type::TENSOR_QUANT8_ASYMM, {1, 8, 1, 1}, 0.1f, 0);
+  OperandType type3(Type::INT32, {});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type10);
+  auto param = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op4 = model->addOperand(&type14);
   // Phase 2, operations
   static int32_t param_init[] = {2};
   model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
@@ -230,16 +286,44 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nhwc_quant8_2(Model *model) {
+void CreateModel_nhwc_float16_2(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type12(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 1}, 0.5f, 128);
-  OperandType type13(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.5f, 128);
+  OperandType type15(Type::TENSOR_FLOAT16, {1, 4, 4, 1});
+  OperandType type16(Type::TENSOR_FLOAT16, {1, 2, 2, 4});
   OperandType type3(Type::INT32, {});
   // Phase 1, operands
-  auto op11 = model->addOperand(&type12);
+  auto op11 = model->addOperand(&type15);
   auto param1 = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
-  auto op41 = model->addOperand(&type13);
+  auto op41 = model->addOperand(&type16);
+  // Phase 2, operations
+  static int32_t param1_init[] = {2};
+  model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_SPACE_TO_DEPTH, {op11, param1, layout}, {op41});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op11},
+    {op41});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nhwc_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nhwc_quant8_2(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type17(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 1}, 0.5f, 128);
+  OperandType type18(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.5f, 128);
+  OperandType type3(Type::INT32, {});
+  // Phase 1, operands
+  auto op11 = model->addOperand(&type17);
+  auto param1 = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op41 = model->addOperand(&type18);
   // Phase 2, operations
   static int32_t param1_init[] = {2};
   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
@@ -260,14 +344,14 @@
 
 void CreateModel_nchw_2(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type14(Type::TENSOR_FLOAT32, {1, 1, 4, 4});
-  OperandType type15(Type::TENSOR_FLOAT32, {1, 4, 2, 2});
+  OperandType type19(Type::TENSOR_FLOAT32, {1, 1, 4, 4});
+  OperandType type20(Type::TENSOR_FLOAT32, {1, 4, 2, 2});
   OperandType type3(Type::INT32, {});
   // Phase 1, operands
-  auto op11 = model->addOperand(&type14);
+  auto op11 = model->addOperand(&type19);
   auto param1 = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
-  auto op41 = model->addOperand(&type15);
+  auto op41 = model->addOperand(&type20);
   // Phase 2, operations
   static int32_t param1_init[] = {2};
   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
@@ -288,14 +372,14 @@
 
 void CreateModel_nchw_relaxed_2(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type14(Type::TENSOR_FLOAT32, {1, 1, 4, 4});
-  OperandType type15(Type::TENSOR_FLOAT32, {1, 4, 2, 2});
+  OperandType type19(Type::TENSOR_FLOAT32, {1, 1, 4, 4});
+  OperandType type20(Type::TENSOR_FLOAT32, {1, 4, 2, 2});
   OperandType type3(Type::INT32, {});
   // Phase 1, operands
-  auto op11 = model->addOperand(&type14);
+  auto op11 = model->addOperand(&type19);
   auto param1 = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
-  auto op41 = model->addOperand(&type15);
+  auto op41 = model->addOperand(&type20);
   // Phase 2, operations
   static int32_t param1_init[] = {2};
   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
@@ -316,16 +400,44 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nchw_quant8_2(Model *model) {
+void CreateModel_nchw_float16_2(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type16(Type::TENSOR_QUANT8_ASYMM, {1, 1, 4, 4}, 0.5f, 128);
-  OperandType type17(Type::TENSOR_QUANT8_ASYMM, {1, 4, 2, 2}, 0.5f, 128);
+  OperandType type21(Type::TENSOR_FLOAT16, {1, 1, 4, 4});
+  OperandType type22(Type::TENSOR_FLOAT16, {1, 4, 2, 2});
   OperandType type3(Type::INT32, {});
   // Phase 1, operands
-  auto op11 = model->addOperand(&type16);
+  auto op11 = model->addOperand(&type21);
   auto param1 = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
-  auto op41 = model->addOperand(&type17);
+  auto op41 = model->addOperand(&type22);
+  // Phase 2, operations
+  static int32_t param1_init[] = {2};
+  model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_SPACE_TO_DEPTH, {op11, param1, layout}, {op41});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op11},
+    {op41});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nchw_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nchw_quant8_2(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type23(Type::TENSOR_QUANT8_ASYMM, {1, 1, 4, 4}, 0.5f, 128);
+  OperandType type24(Type::TENSOR_QUANT8_ASYMM, {1, 4, 2, 2}, 0.5f, 128);
+  OperandType type3(Type::INT32, {});
+  // Phase 1, operands
+  auto op11 = model->addOperand(&type23);
+  auto param1 = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op41 = model->addOperand(&type24);
   // Phase 2, operations
   static int32_t param1_init[] = {2};
   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
@@ -402,16 +514,44 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nhwc_quant8_3(Model *model) {
+void CreateModel_nhwc_float16_3(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type18(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 2}, 1.0f, 0);
-  OperandType type19(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 8}, 1.0f, 0);
+  OperandType type25(Type::TENSOR_FLOAT16, {1, 4, 4, 2});
+  OperandType type26(Type::TENSOR_FLOAT16, {1, 2, 2, 8});
   OperandType type3(Type::INT32, {});
   // Phase 1, operands
-  auto op12 = model->addOperand(&type18);
+  auto op12 = model->addOperand(&type25);
   auto param2 = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
-  auto op42 = model->addOperand(&type19);
+  auto op42 = model->addOperand(&type26);
+  // Phase 2, operations
+  static int32_t param2_init[] = {2};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_SPACE_TO_DEPTH, {op12, param2, layout}, {op42});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op12},
+    {op42});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nhwc_float16_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nhwc_quant8_3(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type27(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 2}, 1.0f, 0);
+  OperandType type28(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 8}, 1.0f, 0);
+  OperandType type3(Type::INT32, {});
+  // Phase 1, operands
+  auto op12 = model->addOperand(&type27);
+  auto param2 = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op42 = model->addOperand(&type28);
   // Phase 2, operations
   static int32_t param2_init[] = {2};
   model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
@@ -432,14 +572,14 @@
 
 void CreateModel_nchw_3(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type20(Type::TENSOR_FLOAT32, {1, 2, 4, 4});
-  OperandType type21(Type::TENSOR_FLOAT32, {1, 8, 2, 2});
+  OperandType type29(Type::TENSOR_FLOAT32, {1, 2, 4, 4});
   OperandType type3(Type::INT32, {});
+  OperandType type30(Type::TENSOR_FLOAT32, {1, 8, 2, 2});
   // Phase 1, operands
-  auto op12 = model->addOperand(&type20);
+  auto op12 = model->addOperand(&type29);
   auto param2 = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
-  auto op42 = model->addOperand(&type21);
+  auto op42 = model->addOperand(&type30);
   // Phase 2, operations
   static int32_t param2_init[] = {2};
   model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
@@ -460,14 +600,14 @@
 
 void CreateModel_nchw_relaxed_3(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type20(Type::TENSOR_FLOAT32, {1, 2, 4, 4});
-  OperandType type21(Type::TENSOR_FLOAT32, {1, 8, 2, 2});
+  OperandType type29(Type::TENSOR_FLOAT32, {1, 2, 4, 4});
   OperandType type3(Type::INT32, {});
+  OperandType type30(Type::TENSOR_FLOAT32, {1, 8, 2, 2});
   // Phase 1, operands
-  auto op12 = model->addOperand(&type20);
+  auto op12 = model->addOperand(&type29);
   auto param2 = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
-  auto op42 = model->addOperand(&type21);
+  auto op42 = model->addOperand(&type30);
   // Phase 2, operations
   static int32_t param2_init[] = {2};
   model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
@@ -488,16 +628,44 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_nchw_quant8_3(Model *model) {
+void CreateModel_nchw_float16_3(Model *model) {
   OperandType type0(Type::BOOL, {});
-  OperandType type22(Type::TENSOR_QUANT8_ASYMM, {1, 2, 4, 4}, 1.0f, 0);
-  OperandType type23(Type::TENSOR_QUANT8_ASYMM, {1, 8, 2, 2}, 1.0f, 0);
   OperandType type3(Type::INT32, {});
+  OperandType type31(Type::TENSOR_FLOAT16, {1, 2, 4, 4});
+  OperandType type32(Type::TENSOR_FLOAT16, {1, 8, 2, 2});
   // Phase 1, operands
-  auto op12 = model->addOperand(&type22);
+  auto op12 = model->addOperand(&type31);
   auto param2 = model->addOperand(&type3);
   auto layout = model->addOperand(&type0);
-  auto op42 = model->addOperand(&type23);
+  auto op42 = model->addOperand(&type32);
+  // Phase 2, operations
+  static int32_t param2_init[] = {2};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_SPACE_TO_DEPTH, {op12, param2, layout}, {op42});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op12},
+    {op42});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nchw_float16_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nchw_quant8_3(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type3(Type::INT32, {});
+  OperandType type33(Type::TENSOR_QUANT8_ASYMM, {1, 2, 4, 4}, 1.0f, 0);
+  OperandType type34(Type::TENSOR_QUANT8_ASYMM, {1, 8, 2, 2}, 1.0f, 0);
+  // Phase 1, operands
+  auto op12 = model->addOperand(&type33);
+  auto param2 = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op42 = model->addOperand(&type34);
   // Phase 2, operations
   static int32_t param2_init[] = {2};
   model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
diff --git a/nn/runtime/test/generated/models/squeeze_float16.model.cpp b/nn/runtime/test/generated/models/squeeze_float16.model.cpp
new file mode 100644
index 0000000..636d170
--- /dev/null
+++ b/nn/runtime/test/generated/models/squeeze_float16.model.cpp
@@ -0,0 +1,26 @@
+// clang-format off
+// Generated file (from: squeeze_float16.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT16, {4, 1, 1, 2});
+  OperandType type1(Type::TENSOR_INT32, {2});
+  OperandType type2(Type::TENSOR_FLOAT16, {4, 2});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto squeezeDims = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t squeezeDims_init[] = {1, 2};
+  model->setOperandValue(squeezeDims, squeezeDims_init, sizeof(int32_t) * 2);
+  model->addOperation(ANEURALNETWORKS_SQUEEZE, {input, squeezeDims}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/models/strided_slice_float16.model.cpp b/nn/runtime/test/generated/models/strided_slice_float16.model.cpp
new file mode 100644
index 0000000..67bc705
--- /dev/null
+++ b/nn/runtime/test/generated/models/strided_slice_float16.model.cpp
@@ -0,0 +1,42 @@
+// clang-format off
+// Generated file (from: strided_slice_float16.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT16, {2, 3});
+  OperandType type1(Type::TENSOR_INT32, {2});
+  OperandType type2(Type::INT32, {});
+  OperandType type3(Type::TENSOR_FLOAT16, {1, 2});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto begins = model->addOperand(&type1);
+  auto ends = model->addOperand(&type1);
+  auto strides = model->addOperand(&type1);
+  auto beginMask = model->addOperand(&type2);
+  auto endMask = model->addOperand(&type2);
+  auto shrinkAxisMask = model->addOperand(&type2);
+  auto output = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t begins_init[] = {0, 0};
+  model->setOperandValue(begins, begins_init, sizeof(int32_t) * 2);
+  static int32_t ends_init[] = {2, 3};
+  model->setOperandValue(ends, ends_init, sizeof(int32_t) * 2);
+  static int32_t strides_init[] = {2, 2};
+  model->setOperandValue(strides, strides_init, sizeof(int32_t) * 2);
+  static int32_t beginMask_init[] = {0};
+  model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
+  static int32_t endMask_init[] = {0};
+  model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
+  static int32_t shrinkAxisMask_init[] = {0};
+  model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/models/transpose_float16.model.cpp b/nn/runtime/test/generated/models/transpose_float16.model.cpp
new file mode 100644
index 0000000..bb7f53b
--- /dev/null
+++ b/nn/runtime/test/generated/models/transpose_float16.model.cpp
@@ -0,0 +1,25 @@
+// clang-format off
+// Generated file (from: transpose_float16.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT16, {1, 2, 2, 1});
+  OperandType type1(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto perms = model->addOperand(&type1);
+  auto output = model->addOperand(&type0);
+  // Phase 2, operations
+  static int32_t perms_init[] = {0, 2, 1, 3};
+  model->setOperandValue(perms, perms_init, sizeof(int32_t) * 4);
+  model->addOperation(ANEURALNETWORKS_TRANSPOSE, {input, perms}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/tests/abs.mod.py.cpp b/nn/runtime/test/generated/tests/abs.mod.py.cpp
new file mode 100644
index 0000000..f148520
--- /dev/null
+++ b/nn/runtime/test/generated/tests/abs.mod.py.cpp
@@ -0,0 +1,29 @@
+// clang-format off
+// Generated file (from: abs.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace abs {
+// Generated abs test
+#include "generated/examples/abs.example.cpp"
+// Generated model constructor
+#include "generated/models/abs.model.cpp"
+} // namespace abs
+
+TEST_F(GeneratedTests, abs) {
+    execute(abs::CreateModel,
+            abs::is_ignored,
+            abs::get_examples());
+}
+
+TEST_F(GeneratedTests, abs_relaxed) {
+    execute(abs::CreateModel_relaxed,
+            abs::is_ignored_relaxed,
+            abs::get_examples_relaxed());
+}
+
+TEST_F(GeneratedTests, abs_float16) {
+    execute(abs::CreateModel_float16,
+            abs::is_ignored_float16,
+            abs::get_examples_float16());
+}
+
diff --git a/nn/runtime/test/generated/tests/add.mod.py.cpp b/nn/runtime/test/generated/tests/add.mod.py.cpp
index 5e99f5c..f0003a9 100644
--- a/nn/runtime/test/generated/tests/add.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/add.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, add) {
     execute(add::CreateModel,
             add::is_ignored,
-            add::examples);
+            add::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/add_broadcast_float16.mod.py.cpp b/nn/runtime/test/generated/tests/add_broadcast_float16.mod.py.cpp
index 1f41d1a..8f215ba 100644
--- a/nn/runtime/test/generated/tests/add_broadcast_float16.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/add_broadcast_float16.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, add_broadcast_float16) {
     execute(add_broadcast_float16::CreateModel,
             add_broadcast_float16::is_ignored,
-            add_broadcast_float16::examples);
+            add_broadcast_float16::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/add_broadcast_quant8.mod.py.cpp b/nn/runtime/test/generated/tests/add_broadcast_quant8.mod.py.cpp
index 90b1c82..5da3678 100644
--- a/nn/runtime/test/generated/tests/add_broadcast_quant8.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/add_broadcast_quant8.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, add_broadcast_quant8) {
     execute(add_broadcast_quant8::CreateModel,
             add_broadcast_quant8::is_ignored,
-            add_broadcast_quant8::examples);
+            add_broadcast_quant8::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/add_float16.mod.py.cpp b/nn/runtime/test/generated/tests/add_float16.mod.py.cpp
index b7f3d89..795c6c6 100644
--- a/nn/runtime/test/generated/tests/add_float16.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/add_float16.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, add_float16) {
     execute(add_float16::CreateModel,
             add_float16::is_ignored,
-            add_float16::examples);
+            add_float16::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/add_quant8.mod.py.cpp b/nn/runtime/test/generated/tests/add_quant8.mod.py.cpp
index 605e388..4304652 100644
--- a/nn/runtime/test/generated/tests/add_quant8.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/add_quant8.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, add_quant8) {
     execute(add_quant8::CreateModel,
             add_quant8::is_ignored,
-            add_quant8::examples);
+            add_quant8::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/add_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/add_relaxed.mod.py.cpp
index 40439b0..bfd483e 100644
--- a/nn/runtime/test/generated/tests/add_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/add_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, add_relaxed) {
     execute(add_relaxed::CreateModel,
             add_relaxed::is_ignored,
-            add_relaxed::examples);
+            add_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/argmax_1.mod.py.cpp b/nn/runtime/test/generated/tests/argmax_1.mod.py.cpp
new file mode 100644
index 0000000..08229f1
--- /dev/null
+++ b/nn/runtime/test/generated/tests/argmax_1.mod.py.cpp
@@ -0,0 +1,41 @@
+// clang-format off
+// Generated file (from: argmax_1.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace argmax_1 {
+// Generated argmax_1 test
+#include "generated/examples/argmax_1.example.cpp"
+// Generated model constructor
+#include "generated/models/argmax_1.model.cpp"
+} // namespace argmax_1
+
+TEST_F(GeneratedTests, argmax_1) {
+    execute(argmax_1::CreateModel,
+            argmax_1::is_ignored,
+            argmax_1::get_examples());
+}
+
+TEST_F(GeneratedTests, argmax_1_relaxed) {
+    execute(argmax_1::CreateModel_relaxed,
+            argmax_1::is_ignored_relaxed,
+            argmax_1::get_examples_relaxed());
+}
+
+TEST_F(GeneratedTests, argmax_1_float16) {
+    execute(argmax_1::CreateModel_float16,
+            argmax_1::is_ignored_float16,
+            argmax_1::get_examples_float16());
+}
+
+TEST_F(GeneratedTests, argmax_1_int32) {
+    execute(argmax_1::CreateModel_int32,
+            argmax_1::is_ignored_int32,
+            argmax_1::get_examples_int32());
+}
+
+TEST_F(GeneratedTests, argmax_1_quant8) {
+    execute(argmax_1::CreateModel_quant8,
+            argmax_1::is_ignored_quant8,
+            argmax_1::get_examples_quant8());
+}
+
diff --git a/nn/runtime/test/generated/tests/argmax_1_float.mod.py.cpp b/nn/runtime/test/generated/tests/argmax_1_float.mod.py.cpp
deleted file mode 100644
index a9132d3..0000000
--- a/nn/runtime/test/generated/tests/argmax_1_float.mod.py.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_1_float.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace argmax_1_float {
-// Generated argmax_1_float test
-#include "generated/examples/argmax_1_float.example.cpp"
-// Generated model constructor
-#include "generated/models/argmax_1_float.model.cpp"
-} // namespace argmax_1_float
-
-TEST_F(GeneratedTests, argmax_1_float) {
-    execute(argmax_1_float::CreateModel,
-            argmax_1_float::is_ignored,
-            argmax_1_float::examples);
-}
-
diff --git a/nn/runtime/test/generated/tests/argmax_1_float_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/argmax_1_float_relaxed.mod.py.cpp
deleted file mode 100644
index 1f96d26..0000000
--- a/nn/runtime/test/generated/tests/argmax_1_float_relaxed.mod.py.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_1_float_relaxed.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace argmax_1_float_relaxed {
-// Generated argmax_1_float_relaxed test
-#include "generated/examples/argmax_1_float_relaxed.example.cpp"
-// Generated model constructor
-#include "generated/models/argmax_1_float_relaxed.model.cpp"
-} // namespace argmax_1_float_relaxed
-
-TEST_F(GeneratedTests, argmax_1_float_relaxed) {
-    execute(argmax_1_float_relaxed::CreateModel,
-            argmax_1_float_relaxed::is_ignored,
-            argmax_1_float_relaxed::examples);
-}
-
diff --git a/nn/runtime/test/generated/tests/argmax_1_int32.mod.py.cpp b/nn/runtime/test/generated/tests/argmax_1_int32.mod.py.cpp
deleted file mode 100644
index 8ff34b5..0000000
--- a/nn/runtime/test/generated/tests/argmax_1_int32.mod.py.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_1_int32.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace argmax_1_int32 {
-// Generated argmax_1_int32 test
-#include "generated/examples/argmax_1_int32.example.cpp"
-// Generated model constructor
-#include "generated/models/argmax_1_int32.model.cpp"
-} // namespace argmax_1_int32
-
-TEST_F(GeneratedTests, argmax_1_int32) {
-    execute(argmax_1_int32::CreateModel,
-            argmax_1_int32::is_ignored,
-            argmax_1_int32::examples);
-}
-
diff --git a/nn/runtime/test/generated/tests/argmax_1_quant8.mod.py.cpp b/nn/runtime/test/generated/tests/argmax_1_quant8.mod.py.cpp
deleted file mode 100644
index 69522c7..0000000
--- a/nn/runtime/test/generated/tests/argmax_1_quant8.mod.py.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_1_quant8.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace argmax_1_quant8 {
-// Generated argmax_1_quant8 test
-#include "generated/examples/argmax_1_quant8.example.cpp"
-// Generated model constructor
-#include "generated/models/argmax_1_quant8.model.cpp"
-} // namespace argmax_1_quant8
-
-TEST_F(GeneratedTests, argmax_1_quant8) {
-    execute(argmax_1_quant8::CreateModel,
-            argmax_1_quant8::is_ignored,
-            argmax_1_quant8::examples);
-}
-
diff --git a/nn/runtime/test/generated/tests/argmax_2.mod.py.cpp b/nn/runtime/test/generated/tests/argmax_2.mod.py.cpp
new file mode 100644
index 0000000..504aa71
--- /dev/null
+++ b/nn/runtime/test/generated/tests/argmax_2.mod.py.cpp
@@ -0,0 +1,41 @@
+// clang-format off
+// Generated file (from: argmax_2.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace argmax_2 {
+// Generated argmax_2 test
+#include "generated/examples/argmax_2.example.cpp"
+// Generated model constructor
+#include "generated/models/argmax_2.model.cpp"
+} // namespace argmax_2
+
+TEST_F(GeneratedTests, argmax_2) {
+    execute(argmax_2::CreateModel,
+            argmax_2::is_ignored,
+            argmax_2::get_examples());
+}
+
+TEST_F(GeneratedTests, argmax_2_relaxed) {
+    execute(argmax_2::CreateModel_relaxed,
+            argmax_2::is_ignored_relaxed,
+            argmax_2::get_examples_relaxed());
+}
+
+TEST_F(GeneratedTests, argmax_2_float16) {
+    execute(argmax_2::CreateModel_float16,
+            argmax_2::is_ignored_float16,
+            argmax_2::get_examples_float16());
+}
+
+TEST_F(GeneratedTests, argmax_2_int32) {
+    execute(argmax_2::CreateModel_int32,
+            argmax_2::is_ignored_int32,
+            argmax_2::get_examples_int32());
+}
+
+TEST_F(GeneratedTests, argmax_2_quant8) {
+    execute(argmax_2::CreateModel_quant8,
+            argmax_2::is_ignored_quant8,
+            argmax_2::get_examples_quant8());
+}
+
diff --git a/nn/runtime/test/generated/tests/argmax_2_float.mod.py.cpp b/nn/runtime/test/generated/tests/argmax_2_float.mod.py.cpp
deleted file mode 100644
index e54d48a..0000000
--- a/nn/runtime/test/generated/tests/argmax_2_float.mod.py.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_2_float.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace argmax_2_float {
-// Generated argmax_2_float test
-#include "generated/examples/argmax_2_float.example.cpp"
-// Generated model constructor
-#include "generated/models/argmax_2_float.model.cpp"
-} // namespace argmax_2_float
-
-TEST_F(GeneratedTests, argmax_2_float) {
-    execute(argmax_2_float::CreateModel,
-            argmax_2_float::is_ignored,
-            argmax_2_float::examples);
-}
-
diff --git a/nn/runtime/test/generated/tests/argmax_2_float_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/argmax_2_float_relaxed.mod.py.cpp
deleted file mode 100644
index 88420a4..0000000
--- a/nn/runtime/test/generated/tests/argmax_2_float_relaxed.mod.py.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_2_float_relaxed.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace argmax_2_float_relaxed {
-// Generated argmax_2_float_relaxed test
-#include "generated/examples/argmax_2_float_relaxed.example.cpp"
-// Generated model constructor
-#include "generated/models/argmax_2_float_relaxed.model.cpp"
-} // namespace argmax_2_float_relaxed
-
-TEST_F(GeneratedTests, argmax_2_float_relaxed) {
-    execute(argmax_2_float_relaxed::CreateModel,
-            argmax_2_float_relaxed::is_ignored,
-            argmax_2_float_relaxed::examples);
-}
-
diff --git a/nn/runtime/test/generated/tests/argmax_2_int32.mod.py.cpp b/nn/runtime/test/generated/tests/argmax_2_int32.mod.py.cpp
deleted file mode 100644
index 5a6f5a7..0000000
--- a/nn/runtime/test/generated/tests/argmax_2_int32.mod.py.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_2_int32.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace argmax_2_int32 {
-// Generated argmax_2_int32 test
-#include "generated/examples/argmax_2_int32.example.cpp"
-// Generated model constructor
-#include "generated/models/argmax_2_int32.model.cpp"
-} // namespace argmax_2_int32
-
-TEST_F(GeneratedTests, argmax_2_int32) {
-    execute(argmax_2_int32::CreateModel,
-            argmax_2_int32::is_ignored,
-            argmax_2_int32::examples);
-}
-
diff --git a/nn/runtime/test/generated/tests/argmax_2_quant8.mod.py.cpp b/nn/runtime/test/generated/tests/argmax_2_quant8.mod.py.cpp
deleted file mode 100644
index 95c4618..0000000
--- a/nn/runtime/test/generated/tests/argmax_2_quant8.mod.py.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_2_quant8.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace argmax_2_quant8 {
-// Generated argmax_2_quant8 test
-#include "generated/examples/argmax_2_quant8.example.cpp"
-// Generated model constructor
-#include "generated/models/argmax_2_quant8.model.cpp"
-} // namespace argmax_2_quant8
-
-TEST_F(GeneratedTests, argmax_2_quant8) {
-    execute(argmax_2_quant8::CreateModel,
-            argmax_2_quant8::is_ignored,
-            argmax_2_quant8::examples);
-}
-
diff --git a/nn/runtime/test/generated/tests/argmax_3.mod.py.cpp b/nn/runtime/test/generated/tests/argmax_3.mod.py.cpp
new file mode 100644
index 0000000..c69947e
--- /dev/null
+++ b/nn/runtime/test/generated/tests/argmax_3.mod.py.cpp
@@ -0,0 +1,41 @@
+// clang-format off
+// Generated file (from: argmax_3.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace argmax_3 {
+// Generated argmax_3 test
+#include "generated/examples/argmax_3.example.cpp"
+// Generated model constructor
+#include "generated/models/argmax_3.model.cpp"
+} // namespace argmax_3
+
+TEST_F(GeneratedTests, argmax_3) {
+    execute(argmax_3::CreateModel,
+            argmax_3::is_ignored,
+            argmax_3::get_examples());
+}
+
+TEST_F(GeneratedTests, argmax_3_relaxed) {
+    execute(argmax_3::CreateModel_relaxed,
+            argmax_3::is_ignored_relaxed,
+            argmax_3::get_examples_relaxed());
+}
+
+TEST_F(GeneratedTests, argmax_3_float16) {
+    execute(argmax_3::CreateModel_float16,
+            argmax_3::is_ignored_float16,
+            argmax_3::get_examples_float16());
+}
+
+TEST_F(GeneratedTests, argmax_3_int32) {
+    execute(argmax_3::CreateModel_int32,
+            argmax_3::is_ignored_int32,
+            argmax_3::get_examples_int32());
+}
+
+TEST_F(GeneratedTests, argmax_3_quant8) {
+    execute(argmax_3::CreateModel_quant8,
+            argmax_3::is_ignored_quant8,
+            argmax_3::get_examples_quant8());
+}
+
diff --git a/nn/runtime/test/generated/tests/argmax_3_float.mod.py.cpp b/nn/runtime/test/generated/tests/argmax_3_float.mod.py.cpp
deleted file mode 100644
index 43a400b..0000000
--- a/nn/runtime/test/generated/tests/argmax_3_float.mod.py.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_3_float.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace argmax_3_float {
-// Generated argmax_3_float test
-#include "generated/examples/argmax_3_float.example.cpp"
-// Generated model constructor
-#include "generated/models/argmax_3_float.model.cpp"
-} // namespace argmax_3_float
-
-TEST_F(GeneratedTests, argmax_3_float) {
-    execute(argmax_3_float::CreateModel,
-            argmax_3_float::is_ignored,
-            argmax_3_float::examples);
-}
-
diff --git a/nn/runtime/test/generated/tests/argmin_1.mod.py.cpp b/nn/runtime/test/generated/tests/argmin_1.mod.py.cpp
new file mode 100644
index 0000000..ae3d9d1
--- /dev/null
+++ b/nn/runtime/test/generated/tests/argmin_1.mod.py.cpp
@@ -0,0 +1,41 @@
+// clang-format off
+// Generated file (from: argmin_1.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace argmin_1 {
+// Generated argmin_1 test
+#include "generated/examples/argmin_1.example.cpp"
+// Generated model constructor
+#include "generated/models/argmin_1.model.cpp"
+} // namespace argmin_1
+
+TEST_F(GeneratedTests, argmin_1) {
+    execute(argmin_1::CreateModel,
+            argmin_1::is_ignored,
+            argmin_1::get_examples());
+}
+
+TEST_F(GeneratedTests, argmin_1_relaxed) {
+    execute(argmin_1::CreateModel_relaxed,
+            argmin_1::is_ignored_relaxed,
+            argmin_1::get_examples_relaxed());
+}
+
+TEST_F(GeneratedTests, argmin_1_float16) {
+    execute(argmin_1::CreateModel_float16,
+            argmin_1::is_ignored_float16,
+            argmin_1::get_examples_float16());
+}
+
+TEST_F(GeneratedTests, argmin_1_int32) {
+    execute(argmin_1::CreateModel_int32,
+            argmin_1::is_ignored_int32,
+            argmin_1::get_examples_int32());
+}
+
+TEST_F(GeneratedTests, argmin_1_quant8) {
+    execute(argmin_1::CreateModel_quant8,
+            argmin_1::is_ignored_quant8,
+            argmin_1::get_examples_quant8());
+}
+
diff --git a/nn/runtime/test/generated/tests/argmin_1_float.mod.py.cpp b/nn/runtime/test/generated/tests/argmin_1_float.mod.py.cpp
deleted file mode 100644
index e4bd9db..0000000
--- a/nn/runtime/test/generated/tests/argmin_1_float.mod.py.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_1_float.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace argmin_1_float {
-// Generated argmin_1_float test
-#include "generated/examples/argmin_1_float.example.cpp"
-// Generated model constructor
-#include "generated/models/argmin_1_float.model.cpp"
-} // namespace argmin_1_float
-
-TEST_F(GeneratedTests, argmin_1_float) {
-    execute(argmin_1_float::CreateModel,
-            argmin_1_float::is_ignored,
-            argmin_1_float::examples);
-}
-
diff --git a/nn/runtime/test/generated/tests/argmin_1_float_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/argmin_1_float_relaxed.mod.py.cpp
deleted file mode 100644
index 6aa5c60..0000000
--- a/nn/runtime/test/generated/tests/argmin_1_float_relaxed.mod.py.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_1_float_relaxed.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace argmin_1_float_relaxed {
-// Generated argmin_1_float_relaxed test
-#include "generated/examples/argmin_1_float_relaxed.example.cpp"
-// Generated model constructor
-#include "generated/models/argmin_1_float_relaxed.model.cpp"
-} // namespace argmin_1_float_relaxed
-
-TEST_F(GeneratedTests, argmin_1_float_relaxed) {
-    execute(argmin_1_float_relaxed::CreateModel,
-            argmin_1_float_relaxed::is_ignored,
-            argmin_1_float_relaxed::examples);
-}
-
diff --git a/nn/runtime/test/generated/tests/argmin_1_int32.mod.py.cpp b/nn/runtime/test/generated/tests/argmin_1_int32.mod.py.cpp
deleted file mode 100644
index f97809e..0000000
--- a/nn/runtime/test/generated/tests/argmin_1_int32.mod.py.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_1_int32.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace argmin_1_int32 {
-// Generated argmin_1_int32 test
-#include "generated/examples/argmin_1_int32.example.cpp"
-// Generated model constructor
-#include "generated/models/argmin_1_int32.model.cpp"
-} // namespace argmin_1_int32
-
-TEST_F(GeneratedTests, argmin_1_int32) {
-    execute(argmin_1_int32::CreateModel,
-            argmin_1_int32::is_ignored,
-            argmin_1_int32::examples);
-}
-
diff --git a/nn/runtime/test/generated/tests/argmin_1_quant8.mod.py.cpp b/nn/runtime/test/generated/tests/argmin_1_quant8.mod.py.cpp
deleted file mode 100644
index 85d9a3f..0000000
--- a/nn/runtime/test/generated/tests/argmin_1_quant8.mod.py.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_1_quant8.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace argmin_1_quant8 {
-// Generated argmin_1_quant8 test
-#include "generated/examples/argmin_1_quant8.example.cpp"
-// Generated model constructor
-#include "generated/models/argmin_1_quant8.model.cpp"
-} // namespace argmin_1_quant8
-
-TEST_F(GeneratedTests, argmin_1_quant8) {
-    execute(argmin_1_quant8::CreateModel,
-            argmin_1_quant8::is_ignored,
-            argmin_1_quant8::examples);
-}
-
diff --git a/nn/runtime/test/generated/tests/argmin_2.mod.py.cpp b/nn/runtime/test/generated/tests/argmin_2.mod.py.cpp
new file mode 100644
index 0000000..5c65fcf
--- /dev/null
+++ b/nn/runtime/test/generated/tests/argmin_2.mod.py.cpp
@@ -0,0 +1,41 @@
+// clang-format off
+// Generated file (from: argmin_2.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace argmin_2 {
+// Generated argmin_2 test
+#include "generated/examples/argmin_2.example.cpp"
+// Generated model constructor
+#include "generated/models/argmin_2.model.cpp"
+} // namespace argmin_2
+
+TEST_F(GeneratedTests, argmin_2) {
+    execute(argmin_2::CreateModel,
+            argmin_2::is_ignored,
+            argmin_2::get_examples());
+}
+
+TEST_F(GeneratedTests, argmin_2_relaxed) {
+    execute(argmin_2::CreateModel_relaxed,
+            argmin_2::is_ignored_relaxed,
+            argmin_2::get_examples_relaxed());
+}
+
+TEST_F(GeneratedTests, argmin_2_float16) {
+    execute(argmin_2::CreateModel_float16,
+            argmin_2::is_ignored_float16,
+            argmin_2::get_examples_float16());
+}
+
+TEST_F(GeneratedTests, argmin_2_int32) {
+    execute(argmin_2::CreateModel_int32,
+            argmin_2::is_ignored_int32,
+            argmin_2::get_examples_int32());
+}
+
+TEST_F(GeneratedTests, argmin_2_quant8) {
+    execute(argmin_2::CreateModel_quant8,
+            argmin_2::is_ignored_quant8,
+            argmin_2::get_examples_quant8());
+}
+
diff --git a/nn/runtime/test/generated/tests/argmin_2_float.mod.py.cpp b/nn/runtime/test/generated/tests/argmin_2_float.mod.py.cpp
deleted file mode 100644
index 32cc76f..0000000
--- a/nn/runtime/test/generated/tests/argmin_2_float.mod.py.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_2_float.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace argmin_2_float {
-// Generated argmin_2_float test
-#include "generated/examples/argmin_2_float.example.cpp"
-// Generated model constructor
-#include "generated/models/argmin_2_float.model.cpp"
-} // namespace argmin_2_float
-
-TEST_F(GeneratedTests, argmin_2_float) {
-    execute(argmin_2_float::CreateModel,
-            argmin_2_float::is_ignored,
-            argmin_2_float::examples);
-}
-
diff --git a/nn/runtime/test/generated/tests/argmin_2_float_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/argmin_2_float_relaxed.mod.py.cpp
deleted file mode 100644
index c7c6be6..0000000
--- a/nn/runtime/test/generated/tests/argmin_2_float_relaxed.mod.py.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_2_float_relaxed.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace argmin_2_float_relaxed {
-// Generated argmin_2_float_relaxed test
-#include "generated/examples/argmin_2_float_relaxed.example.cpp"
-// Generated model constructor
-#include "generated/models/argmin_2_float_relaxed.model.cpp"
-} // namespace argmin_2_float_relaxed
-
-TEST_F(GeneratedTests, argmin_2_float_relaxed) {
-    execute(argmin_2_float_relaxed::CreateModel,
-            argmin_2_float_relaxed::is_ignored,
-            argmin_2_float_relaxed::examples);
-}
-
diff --git a/nn/runtime/test/generated/tests/argmin_2_int32.mod.py.cpp b/nn/runtime/test/generated/tests/argmin_2_int32.mod.py.cpp
deleted file mode 100644
index 8974781..0000000
--- a/nn/runtime/test/generated/tests/argmin_2_int32.mod.py.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_2_int32.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace argmin_2_int32 {
-// Generated argmin_2_int32 test
-#include "generated/examples/argmin_2_int32.example.cpp"
-// Generated model constructor
-#include "generated/models/argmin_2_int32.model.cpp"
-} // namespace argmin_2_int32
-
-TEST_F(GeneratedTests, argmin_2_int32) {
-    execute(argmin_2_int32::CreateModel,
-            argmin_2_int32::is_ignored,
-            argmin_2_int32::examples);
-}
-
diff --git a/nn/runtime/test/generated/tests/argmin_2_quant8.mod.py.cpp b/nn/runtime/test/generated/tests/argmin_2_quant8.mod.py.cpp
deleted file mode 100644
index 68c5926..0000000
--- a/nn/runtime/test/generated/tests/argmin_2_quant8.mod.py.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_2_quant8.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace argmin_2_quant8 {
-// Generated argmin_2_quant8 test
-#include "generated/examples/argmin_2_quant8.example.cpp"
-// Generated model constructor
-#include "generated/models/argmin_2_quant8.model.cpp"
-} // namespace argmin_2_quant8
-
-TEST_F(GeneratedTests, argmin_2_quant8) {
-    execute(argmin_2_quant8::CreateModel,
-            argmin_2_quant8::is_ignored,
-            argmin_2_quant8::examples);
-}
-
diff --git a/nn/runtime/test/generated/tests/argmin_3.mod.py.cpp b/nn/runtime/test/generated/tests/argmin_3.mod.py.cpp
new file mode 100644
index 0000000..df0aa0b
--- /dev/null
+++ b/nn/runtime/test/generated/tests/argmin_3.mod.py.cpp
@@ -0,0 +1,41 @@
+// clang-format off
+// Generated file (from: argmin_3.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace argmin_3 {
+// Generated argmin_3 test
+#include "generated/examples/argmin_3.example.cpp"
+// Generated model constructor
+#include "generated/models/argmin_3.model.cpp"
+} // namespace argmin_3
+
+TEST_F(GeneratedTests, argmin_3) {
+    execute(argmin_3::CreateModel,
+            argmin_3::is_ignored,
+            argmin_3::get_examples());
+}
+
+TEST_F(GeneratedTests, argmin_3_relaxed) {
+    execute(argmin_3::CreateModel_relaxed,
+            argmin_3::is_ignored_relaxed,
+            argmin_3::get_examples_relaxed());
+}
+
+TEST_F(GeneratedTests, argmin_3_float16) {
+    execute(argmin_3::CreateModel_float16,
+            argmin_3::is_ignored_float16,
+            argmin_3::get_examples_float16());
+}
+
+TEST_F(GeneratedTests, argmin_3_int32) {
+    execute(argmin_3::CreateModel_int32,
+            argmin_3::is_ignored_int32,
+            argmin_3::get_examples_int32());
+}
+
+TEST_F(GeneratedTests, argmin_3_quant8) {
+    execute(argmin_3::CreateModel_quant8,
+            argmin_3::is_ignored_quant8,
+            argmin_3::get_examples_quant8());
+}
+
diff --git a/nn/runtime/test/generated/tests/argmin_3_float.mod.py.cpp b/nn/runtime/test/generated/tests/argmin_3_float.mod.py.cpp
deleted file mode 100644
index f2de424..0000000
--- a/nn/runtime/test/generated/tests/argmin_3_float.mod.py.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_3_float.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace argmin_3_float {
-// Generated argmin_3_float test
-#include "generated/examples/argmin_3_float.example.cpp"
-// Generated model constructor
-#include "generated/models/argmin_3_float.model.cpp"
-} // namespace argmin_3_float
-
-TEST_F(GeneratedTests, argmin_3_float) {
-    execute(argmin_3_float::CreateModel,
-            argmin_3_float::is_ignored,
-            argmin_3_float::examples);
-}
-
diff --git a/nn/runtime/test/generated/tests/avg_pool_float_1.mod.py.cpp b/nn/runtime/test/generated/tests/avg_pool_float_1.mod.py.cpp
index 6e64c72..647d2c9 100644
--- a/nn/runtime/test/generated/tests/avg_pool_float_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/avg_pool_float_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, avg_pool_float_1) {
     execute(avg_pool_float_1::CreateModel,
             avg_pool_float_1::is_ignored,
-            avg_pool_float_1::examples);
+            avg_pool_float_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/avg_pool_float_1_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/avg_pool_float_1_relaxed.mod.py.cpp
index 5ad3796..4bb0388 100644
--- a/nn/runtime/test/generated/tests/avg_pool_float_1_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/avg_pool_float_1_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, avg_pool_float_1_relaxed) {
     execute(avg_pool_float_1_relaxed::CreateModel,
             avg_pool_float_1_relaxed::is_ignored,
-            avg_pool_float_1_relaxed::examples);
+            avg_pool_float_1_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/avg_pool_float_2.mod.py.cpp b/nn/runtime/test/generated/tests/avg_pool_float_2.mod.py.cpp
index 5348253..680a357 100644
--- a/nn/runtime/test/generated/tests/avg_pool_float_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/avg_pool_float_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, avg_pool_float_2) {
     execute(avg_pool_float_2::CreateModel,
             avg_pool_float_2::is_ignored,
-            avg_pool_float_2::examples);
+            avg_pool_float_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/avg_pool_float_2_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/avg_pool_float_2_relaxed.mod.py.cpp
index 50f47fa..6f251ad 100644
--- a/nn/runtime/test/generated/tests/avg_pool_float_2_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/avg_pool_float_2_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, avg_pool_float_2_relaxed) {
     execute(avg_pool_float_2_relaxed::CreateModel,
             avg_pool_float_2_relaxed::is_ignored,
-            avg_pool_float_2_relaxed::examples);
+            avg_pool_float_2_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/avg_pool_float_3.mod.py.cpp b/nn/runtime/test/generated/tests/avg_pool_float_3.mod.py.cpp
index a98744a..976311a 100644
--- a/nn/runtime/test/generated/tests/avg_pool_float_3.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/avg_pool_float_3.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, avg_pool_float_3) {
     execute(avg_pool_float_3::CreateModel,
             avg_pool_float_3::is_ignored,
-            avg_pool_float_3::examples);
+            avg_pool_float_3::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/avg_pool_float_3_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/avg_pool_float_3_relaxed.mod.py.cpp
index 37882a2..acb0086 100644
--- a/nn/runtime/test/generated/tests/avg_pool_float_3_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/avg_pool_float_3_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, avg_pool_float_3_relaxed) {
     execute(avg_pool_float_3_relaxed::CreateModel,
             avg_pool_float_3_relaxed::is_ignored,
-            avg_pool_float_3_relaxed::examples);
+            avg_pool_float_3_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/avg_pool_float_4.mod.py.cpp b/nn/runtime/test/generated/tests/avg_pool_float_4.mod.py.cpp
index c412b1d..36f2ff0 100644
--- a/nn/runtime/test/generated/tests/avg_pool_float_4.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/avg_pool_float_4.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, avg_pool_float_4) {
     execute(avg_pool_float_4::CreateModel,
             avg_pool_float_4::is_ignored,
-            avg_pool_float_4::examples);
+            avg_pool_float_4::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/avg_pool_float_4_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/avg_pool_float_4_relaxed.mod.py.cpp
index 69b6633..2fe27d2 100644
--- a/nn/runtime/test/generated/tests/avg_pool_float_4_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/avg_pool_float_4_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, avg_pool_float_4_relaxed) {
     execute(avg_pool_float_4_relaxed::CreateModel,
             avg_pool_float_4_relaxed::is_ignored,
-            avg_pool_float_4_relaxed::examples);
+            avg_pool_float_4_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/avg_pool_float_5.mod.py.cpp b/nn/runtime/test/generated/tests/avg_pool_float_5.mod.py.cpp
index 7f0e6e5..87ae087 100644
--- a/nn/runtime/test/generated/tests/avg_pool_float_5.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/avg_pool_float_5.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, avg_pool_float_5) {
     execute(avg_pool_float_5::CreateModel,
             avg_pool_float_5::is_ignored,
-            avg_pool_float_5::examples);
+            avg_pool_float_5::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/avg_pool_float_5_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/avg_pool_float_5_relaxed.mod.py.cpp
index 83e174a..83bb9b0 100644
--- a/nn/runtime/test/generated/tests/avg_pool_float_5_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/avg_pool_float_5_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, avg_pool_float_5_relaxed) {
     execute(avg_pool_float_5_relaxed::CreateModel,
             avg_pool_float_5_relaxed::is_ignored,
-            avg_pool_float_5_relaxed::examples);
+            avg_pool_float_5_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/avg_pool_quant8_1.mod.py.cpp b/nn/runtime/test/generated/tests/avg_pool_quant8_1.mod.py.cpp
index 4b70f99..ec5425f 100644
--- a/nn/runtime/test/generated/tests/avg_pool_quant8_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/avg_pool_quant8_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, avg_pool_quant8_1) {
     execute(avg_pool_quant8_1::CreateModel,
             avg_pool_quant8_1::is_ignored,
-            avg_pool_quant8_1::examples);
+            avg_pool_quant8_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/avg_pool_quant8_2.mod.py.cpp b/nn/runtime/test/generated/tests/avg_pool_quant8_2.mod.py.cpp
index 0867860..c867e53 100644
--- a/nn/runtime/test/generated/tests/avg_pool_quant8_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/avg_pool_quant8_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, avg_pool_quant8_2) {
     execute(avg_pool_quant8_2::CreateModel,
             avg_pool_quant8_2::is_ignored,
-            avg_pool_quant8_2::examples);
+            avg_pool_quant8_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/avg_pool_quant8_3.mod.py.cpp b/nn/runtime/test/generated/tests/avg_pool_quant8_3.mod.py.cpp
index f15b0c0..c284ad0 100644
--- a/nn/runtime/test/generated/tests/avg_pool_quant8_3.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/avg_pool_quant8_3.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, avg_pool_quant8_3) {
     execute(avg_pool_quant8_3::CreateModel,
             avg_pool_quant8_3::is_ignored,
-            avg_pool_quant8_3::examples);
+            avg_pool_quant8_3::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/avg_pool_quant8_4.mod.py.cpp b/nn/runtime/test/generated/tests/avg_pool_quant8_4.mod.py.cpp
index 69e8d38..1abc637 100644
--- a/nn/runtime/test/generated/tests/avg_pool_quant8_4.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/avg_pool_quant8_4.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, avg_pool_quant8_4) {
     execute(avg_pool_quant8_4::CreateModel,
             avg_pool_quant8_4::is_ignored,
-            avg_pool_quant8_4::examples);
+            avg_pool_quant8_4::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/avg_pool_quant8_5.mod.py.cpp b/nn/runtime/test/generated/tests/avg_pool_quant8_5.mod.py.cpp
index 71d0a30..f2248ba 100644
--- a/nn/runtime/test/generated/tests/avg_pool_quant8_5.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/avg_pool_quant8_5.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, avg_pool_quant8_5) {
     execute(avg_pool_quant8_5::CreateModel,
             avg_pool_quant8_5::is_ignored,
-            avg_pool_quant8_5::examples);
+            avg_pool_quant8_5::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/avg_pool_v1_2.mod.py.cpp b/nn/runtime/test/generated/tests/avg_pool_v1_2.mod.py.cpp
index 9854b14..d9f210d 100644
--- a/nn/runtime/test/generated/tests/avg_pool_v1_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/avg_pool_v1_2.mod.py.cpp
@@ -12,180 +12,180 @@
 TEST_F(GeneratedTests, avg_pool_v1_2_nhwc) {
     execute(avg_pool_v1_2::CreateModel_nhwc,
             avg_pool_v1_2::is_ignored_nhwc,
-            avg_pool_v1_2::examples_nhwc);
+            avg_pool_v1_2::get_examples_nhwc());
 }
 
 TEST_F(GeneratedTests, avg_pool_v1_2_nhwc_relaxed) {
     execute(avg_pool_v1_2::CreateModel_nhwc_relaxed,
             avg_pool_v1_2::is_ignored_nhwc_relaxed,
-            avg_pool_v1_2::examples_nhwc_relaxed);
+            avg_pool_v1_2::get_examples_nhwc_relaxed());
 }
 
 TEST_F(GeneratedTests, avg_pool_v1_2_nhwc_quant8) {
     execute(avg_pool_v1_2::CreateModel_nhwc_quant8,
             avg_pool_v1_2::is_ignored_nhwc_quant8,
-            avg_pool_v1_2::examples_nhwc_quant8);
+            avg_pool_v1_2::get_examples_nhwc_quant8());
 }
 
 TEST_F(GeneratedTests, avg_pool_v1_2_nchw) {
     execute(avg_pool_v1_2::CreateModel_nchw,
             avg_pool_v1_2::is_ignored_nchw,
-            avg_pool_v1_2::examples_nchw);
+            avg_pool_v1_2::get_examples_nchw());
 }
 
 TEST_F(GeneratedTests, avg_pool_v1_2_nchw_relaxed) {
     execute(avg_pool_v1_2::CreateModel_nchw_relaxed,
             avg_pool_v1_2::is_ignored_nchw_relaxed,
-            avg_pool_v1_2::examples_nchw_relaxed);
+            avg_pool_v1_2::get_examples_nchw_relaxed());
 }
 
 TEST_F(GeneratedTests, avg_pool_v1_2_nchw_quant8) {
     execute(avg_pool_v1_2::CreateModel_nchw_quant8,
             avg_pool_v1_2::is_ignored_nchw_quant8,
-            avg_pool_v1_2::examples_nchw_quant8);
+            avg_pool_v1_2::get_examples_nchw_quant8());
 }
 
 TEST_F(GeneratedTests, avg_pool_v1_2_nhwc_2) {
     execute(avg_pool_v1_2::CreateModel_nhwc_2,
             avg_pool_v1_2::is_ignored_nhwc_2,
-            avg_pool_v1_2::examples_nhwc_2);
+            avg_pool_v1_2::get_examples_nhwc_2());
 }
 
 TEST_F(GeneratedTests, avg_pool_v1_2_nhwc_relaxed_2) {
     execute(avg_pool_v1_2::CreateModel_nhwc_relaxed_2,
             avg_pool_v1_2::is_ignored_nhwc_relaxed_2,
-            avg_pool_v1_2::examples_nhwc_relaxed_2);
+            avg_pool_v1_2::get_examples_nhwc_relaxed_2());
 }
 
 TEST_F(GeneratedTests, avg_pool_v1_2_nhwc_quant8_2) {
     execute(avg_pool_v1_2::CreateModel_nhwc_quant8_2,
             avg_pool_v1_2::is_ignored_nhwc_quant8_2,
-            avg_pool_v1_2::examples_nhwc_quant8_2);
+            avg_pool_v1_2::get_examples_nhwc_quant8_2());
 }
 
 TEST_F(GeneratedTests, avg_pool_v1_2_nchw_2) {
     execute(avg_pool_v1_2::CreateModel_nchw_2,
             avg_pool_v1_2::is_ignored_nchw_2,
-            avg_pool_v1_2::examples_nchw_2);
+            avg_pool_v1_2::get_examples_nchw_2());
 }
 
 TEST_F(GeneratedTests, avg_pool_v1_2_nchw_relaxed_2) {
     execute(avg_pool_v1_2::CreateModel_nchw_relaxed_2,
             avg_pool_v1_2::is_ignored_nchw_relaxed_2,
-            avg_pool_v1_2::examples_nchw_relaxed_2);
+            avg_pool_v1_2::get_examples_nchw_relaxed_2());
 }
 
 TEST_F(GeneratedTests, avg_pool_v1_2_nchw_quant8_2) {
     execute(avg_pool_v1_2::CreateModel_nchw_quant8_2,
             avg_pool_v1_2::is_ignored_nchw_quant8_2,
-            avg_pool_v1_2::examples_nchw_quant8_2);
+            avg_pool_v1_2::get_examples_nchw_quant8_2());
 }
 
 TEST_F(GeneratedTests, avg_pool_v1_2_nhwc_3) {
     execute(avg_pool_v1_2::CreateModel_nhwc_3,
             avg_pool_v1_2::is_ignored_nhwc_3,
-            avg_pool_v1_2::examples_nhwc_3);
+            avg_pool_v1_2::get_examples_nhwc_3());
 }
 
 TEST_F(GeneratedTests, avg_pool_v1_2_nhwc_relaxed_3) {
     execute(avg_pool_v1_2::CreateModel_nhwc_relaxed_3,
             avg_pool_v1_2::is_ignored_nhwc_relaxed_3,
-            avg_pool_v1_2::examples_nhwc_relaxed_3);
+            avg_pool_v1_2::get_examples_nhwc_relaxed_3());
 }
 
 TEST_F(GeneratedTests, avg_pool_v1_2_nhwc_quant8_3) {
     execute(avg_pool_v1_2::CreateModel_nhwc_quant8_3,
             avg_pool_v1_2::is_ignored_nhwc_quant8_3,
-            avg_pool_v1_2::examples_nhwc_quant8_3);
+            avg_pool_v1_2::get_examples_nhwc_quant8_3());
 }
 
 TEST_F(GeneratedTests, avg_pool_v1_2_nchw_3) {
     execute(avg_pool_v1_2::CreateModel_nchw_3,
             avg_pool_v1_2::is_ignored_nchw_3,
-            avg_pool_v1_2::examples_nchw_3);
+            avg_pool_v1_2::get_examples_nchw_3());
 }
 
 TEST_F(GeneratedTests, avg_pool_v1_2_nchw_relaxed_3) {
     execute(avg_pool_v1_2::CreateModel_nchw_relaxed_3,
             avg_pool_v1_2::is_ignored_nchw_relaxed_3,
-            avg_pool_v1_2::examples_nchw_relaxed_3);
+            avg_pool_v1_2::get_examples_nchw_relaxed_3());
 }
 
 TEST_F(GeneratedTests, avg_pool_v1_2_nchw_quant8_3) {
     execute(avg_pool_v1_2::CreateModel_nchw_quant8_3,
             avg_pool_v1_2::is_ignored_nchw_quant8_3,
-            avg_pool_v1_2::examples_nchw_quant8_3);
+            avg_pool_v1_2::get_examples_nchw_quant8_3());
 }
 
 TEST_F(GeneratedTests, avg_pool_v1_2_nhwc_4) {
     execute(avg_pool_v1_2::CreateModel_nhwc_4,
             avg_pool_v1_2::is_ignored_nhwc_4,
-            avg_pool_v1_2::examples_nhwc_4);
+            avg_pool_v1_2::get_examples_nhwc_4());
 }
 
 TEST_F(GeneratedTests, avg_pool_v1_2_nhwc_relaxed_4) {
     execute(avg_pool_v1_2::CreateModel_nhwc_relaxed_4,
             avg_pool_v1_2::is_ignored_nhwc_relaxed_4,
-            avg_pool_v1_2::examples_nhwc_relaxed_4);
+            avg_pool_v1_2::get_examples_nhwc_relaxed_4());
 }
 
 TEST_F(GeneratedTests, avg_pool_v1_2_nhwc_quant8_4) {
     execute(avg_pool_v1_2::CreateModel_nhwc_quant8_4,
             avg_pool_v1_2::is_ignored_nhwc_quant8_4,
-            avg_pool_v1_2::examples_nhwc_quant8_4);
+            avg_pool_v1_2::get_examples_nhwc_quant8_4());
 }
 
 TEST_F(GeneratedTests, avg_pool_v1_2_nchw_4) {
     execute(avg_pool_v1_2::CreateModel_nchw_4,
             avg_pool_v1_2::is_ignored_nchw_4,
-            avg_pool_v1_2::examples_nchw_4);
+            avg_pool_v1_2::get_examples_nchw_4());
 }
 
 TEST_F(GeneratedTests, avg_pool_v1_2_nchw_relaxed_4) {
     execute(avg_pool_v1_2::CreateModel_nchw_relaxed_4,
             avg_pool_v1_2::is_ignored_nchw_relaxed_4,
-            avg_pool_v1_2::examples_nchw_relaxed_4);
+            avg_pool_v1_2::get_examples_nchw_relaxed_4());
 }
 
 TEST_F(GeneratedTests, avg_pool_v1_2_nchw_quant8_4) {
     execute(avg_pool_v1_2::CreateModel_nchw_quant8_4,
             avg_pool_v1_2::is_ignored_nchw_quant8_4,
-            avg_pool_v1_2::examples_nchw_quant8_4);
+            avg_pool_v1_2::get_examples_nchw_quant8_4());
 }
 
 TEST_F(GeneratedTests, avg_pool_v1_2_nhwc_5) {
     execute(avg_pool_v1_2::CreateModel_nhwc_5,
             avg_pool_v1_2::is_ignored_nhwc_5,
-            avg_pool_v1_2::examples_nhwc_5);
+            avg_pool_v1_2::get_examples_nhwc_5());
 }
 
 TEST_F(GeneratedTests, avg_pool_v1_2_nhwc_relaxed_5) {
     execute(avg_pool_v1_2::CreateModel_nhwc_relaxed_5,
             avg_pool_v1_2::is_ignored_nhwc_relaxed_5,
-            avg_pool_v1_2::examples_nhwc_relaxed_5);
+            avg_pool_v1_2::get_examples_nhwc_relaxed_5());
 }
 
 TEST_F(GeneratedTests, avg_pool_v1_2_nhwc_quant8_5) {
     execute(avg_pool_v1_2::CreateModel_nhwc_quant8_5,
             avg_pool_v1_2::is_ignored_nhwc_quant8_5,
-            avg_pool_v1_2::examples_nhwc_quant8_5);
+            avg_pool_v1_2::get_examples_nhwc_quant8_5());
 }
 
 TEST_F(GeneratedTests, avg_pool_v1_2_nchw_5) {
     execute(avg_pool_v1_2::CreateModel_nchw_5,
             avg_pool_v1_2::is_ignored_nchw_5,
-            avg_pool_v1_2::examples_nchw_5);
+            avg_pool_v1_2::get_examples_nchw_5());
 }
 
 TEST_F(GeneratedTests, avg_pool_v1_2_nchw_relaxed_5) {
     execute(avg_pool_v1_2::CreateModel_nchw_relaxed_5,
             avg_pool_v1_2::is_ignored_nchw_relaxed_5,
-            avg_pool_v1_2::examples_nchw_relaxed_5);
+            avg_pool_v1_2::get_examples_nchw_relaxed_5());
 }
 
 TEST_F(GeneratedTests, avg_pool_v1_2_nchw_quant8_5) {
     execute(avg_pool_v1_2::CreateModel_nchw_quant8_5,
             avg_pool_v1_2::is_ignored_nchw_quant8_5,
-            avg_pool_v1_2::examples_nchw_quant8_5);
+            avg_pool_v1_2::get_examples_nchw_quant8_5());
 }
 
diff --git a/nn/runtime/test/generated/tests/axis_aligned_bbox_transform.mod.py.cpp b/nn/runtime/test/generated/tests/axis_aligned_bbox_transform.mod.py.cpp
index 3bc8207..42ab529 100644
--- a/nn/runtime/test/generated/tests/axis_aligned_bbox_transform.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/axis_aligned_bbox_transform.mod.py.cpp
@@ -12,36 +12,36 @@
 TEST_F(GeneratedTests, axis_aligned_bbox_transform) {
     execute(axis_aligned_bbox_transform::CreateModel,
             axis_aligned_bbox_transform::is_ignored,
-            axis_aligned_bbox_transform::examples);
+            axis_aligned_bbox_transform::get_examples());
 }
 
 TEST_F(GeneratedTests, axis_aligned_bbox_transform_relaxed) {
     execute(axis_aligned_bbox_transform::CreateModel_relaxed,
             axis_aligned_bbox_transform::is_ignored_relaxed,
-            axis_aligned_bbox_transform::examples_relaxed);
+            axis_aligned_bbox_transform::get_examples_relaxed());
 }
 
 TEST_F(GeneratedTests, axis_aligned_bbox_transform_2) {
     execute(axis_aligned_bbox_transform::CreateModel_2,
             axis_aligned_bbox_transform::is_ignored_2,
-            axis_aligned_bbox_transform::examples_2);
+            axis_aligned_bbox_transform::get_examples_2());
 }
 
 TEST_F(GeneratedTests, axis_aligned_bbox_transform_relaxed_2) {
     execute(axis_aligned_bbox_transform::CreateModel_relaxed_2,
             axis_aligned_bbox_transform::is_ignored_relaxed_2,
-            axis_aligned_bbox_transform::examples_relaxed_2);
+            axis_aligned_bbox_transform::get_examples_relaxed_2());
 }
 
 TEST_F(GeneratedTests, axis_aligned_bbox_transform_single_batch) {
     execute(axis_aligned_bbox_transform::CreateModel_single_batch,
             axis_aligned_bbox_transform::is_ignored_single_batch,
-            axis_aligned_bbox_transform::examples_single_batch);
+            axis_aligned_bbox_transform::get_examples_single_batch());
 }
 
 TEST_F(GeneratedTests, axis_aligned_bbox_transform_single_batch_relaxed) {
     execute(axis_aligned_bbox_transform::CreateModel_single_batch_relaxed,
             axis_aligned_bbox_transform::is_ignored_single_batch_relaxed,
-            axis_aligned_bbox_transform::examples_single_batch_relaxed);
+            axis_aligned_bbox_transform::get_examples_single_batch_relaxed());
 }
 
diff --git a/nn/runtime/test/generated/tests/batch_to_space.mod.py.cpp b/nn/runtime/test/generated/tests/batch_to_space.mod.py.cpp
index 465f1ff..3e84ac0 100644
--- a/nn/runtime/test/generated/tests/batch_to_space.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/batch_to_space.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, batch_to_space) {
     execute(batch_to_space::CreateModel,
             batch_to_space::is_ignored,
-            batch_to_space::examples);
+            batch_to_space::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/batch_to_space_float_1.mod.py.cpp b/nn/runtime/test/generated/tests/batch_to_space_float_1.mod.py.cpp
index 3079675..b216ce9 100644
--- a/nn/runtime/test/generated/tests/batch_to_space_float_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/batch_to_space_float_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, batch_to_space_float_1) {
     execute(batch_to_space_float_1::CreateModel,
             batch_to_space_float_1::is_ignored,
-            batch_to_space_float_1::examples);
+            batch_to_space_float_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/batch_to_space_float_1_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/batch_to_space_float_1_relaxed.mod.py.cpp
index ed91d30..dca843f 100644
--- a/nn/runtime/test/generated/tests/batch_to_space_float_1_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/batch_to_space_float_1_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, batch_to_space_float_1_relaxed) {
     execute(batch_to_space_float_1_relaxed::CreateModel,
             batch_to_space_float_1_relaxed::is_ignored,
-            batch_to_space_float_1_relaxed::examples);
+            batch_to_space_float_1_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/batch_to_space_quant8_1.mod.py.cpp b/nn/runtime/test/generated/tests/batch_to_space_quant8_1.mod.py.cpp
index 8d59fa7..17c709c 100644
--- a/nn/runtime/test/generated/tests/batch_to_space_quant8_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/batch_to_space_quant8_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, batch_to_space_quant8_1) {
     execute(batch_to_space_quant8_1::CreateModel,
             batch_to_space_quant8_1::is_ignored,
-            batch_to_space_quant8_1::examples);
+            batch_to_space_quant8_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/batch_to_space_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/batch_to_space_relaxed.mod.py.cpp
index 21e94d9..bd822f4 100644
--- a/nn/runtime/test/generated/tests/batch_to_space_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/batch_to_space_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, batch_to_space_relaxed) {
     execute(batch_to_space_relaxed::CreateModel,
             batch_to_space_relaxed::is_ignored,
-            batch_to_space_relaxed::examples);
+            batch_to_space_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/batch_to_space_v1_2.mod.py.cpp b/nn/runtime/test/generated/tests/batch_to_space_v1_2.mod.py.cpp
index 38a7ec8..1a26226 100644
--- a/nn/runtime/test/generated/tests/batch_to_space_v1_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/batch_to_space_v1_2.mod.py.cpp
@@ -12,72 +12,96 @@
 TEST_F(GeneratedTests, batch_to_space_v1_2_nhwc) {
     execute(batch_to_space_v1_2::CreateModel_nhwc,
             batch_to_space_v1_2::is_ignored_nhwc,
-            batch_to_space_v1_2::examples_nhwc);
+            batch_to_space_v1_2::get_examples_nhwc());
 }
 
 TEST_F(GeneratedTests, batch_to_space_v1_2_nhwc_relaxed) {
     execute(batch_to_space_v1_2::CreateModel_nhwc_relaxed,
             batch_to_space_v1_2::is_ignored_nhwc_relaxed,
-            batch_to_space_v1_2::examples_nhwc_relaxed);
+            batch_to_space_v1_2::get_examples_nhwc_relaxed());
+}
+
+TEST_F(GeneratedTests, batch_to_space_v1_2_nhwc_float16) {
+    execute(batch_to_space_v1_2::CreateModel_nhwc_float16,
+            batch_to_space_v1_2::is_ignored_nhwc_float16,
+            batch_to_space_v1_2::get_examples_nhwc_float16());
 }
 
 TEST_F(GeneratedTests, batch_to_space_v1_2_nhwc_quant8) {
     execute(batch_to_space_v1_2::CreateModel_nhwc_quant8,
             batch_to_space_v1_2::is_ignored_nhwc_quant8,
-            batch_to_space_v1_2::examples_nhwc_quant8);
+            batch_to_space_v1_2::get_examples_nhwc_quant8());
 }
 
 TEST_F(GeneratedTests, batch_to_space_v1_2_nchw) {
     execute(batch_to_space_v1_2::CreateModel_nchw,
             batch_to_space_v1_2::is_ignored_nchw,
-            batch_to_space_v1_2::examples_nchw);
+            batch_to_space_v1_2::get_examples_nchw());
 }
 
 TEST_F(GeneratedTests, batch_to_space_v1_2_nchw_relaxed) {
     execute(batch_to_space_v1_2::CreateModel_nchw_relaxed,
             batch_to_space_v1_2::is_ignored_nchw_relaxed,
-            batch_to_space_v1_2::examples_nchw_relaxed);
+            batch_to_space_v1_2::get_examples_nchw_relaxed());
+}
+
+TEST_F(GeneratedTests, batch_to_space_v1_2_nchw_float16) {
+    execute(batch_to_space_v1_2::CreateModel_nchw_float16,
+            batch_to_space_v1_2::is_ignored_nchw_float16,
+            batch_to_space_v1_2::get_examples_nchw_float16());
 }
 
 TEST_F(GeneratedTests, batch_to_space_v1_2_nchw_quant8) {
     execute(batch_to_space_v1_2::CreateModel_nchw_quant8,
             batch_to_space_v1_2::is_ignored_nchw_quant8,
-            batch_to_space_v1_2::examples_nchw_quant8);
+            batch_to_space_v1_2::get_examples_nchw_quant8());
 }
 
 TEST_F(GeneratedTests, batch_to_space_v1_2_nhwc_2) {
     execute(batch_to_space_v1_2::CreateModel_nhwc_2,
             batch_to_space_v1_2::is_ignored_nhwc_2,
-            batch_to_space_v1_2::examples_nhwc_2);
+            batch_to_space_v1_2::get_examples_nhwc_2());
 }
 
 TEST_F(GeneratedTests, batch_to_space_v1_2_nhwc_relaxed_2) {
     execute(batch_to_space_v1_2::CreateModel_nhwc_relaxed_2,
             batch_to_space_v1_2::is_ignored_nhwc_relaxed_2,
-            batch_to_space_v1_2::examples_nhwc_relaxed_2);
+            batch_to_space_v1_2::get_examples_nhwc_relaxed_2());
+}
+
+TEST_F(GeneratedTests, batch_to_space_v1_2_nhwc_float16_2) {
+    execute(batch_to_space_v1_2::CreateModel_nhwc_float16_2,
+            batch_to_space_v1_2::is_ignored_nhwc_float16_2,
+            batch_to_space_v1_2::get_examples_nhwc_float16_2());
 }
 
 TEST_F(GeneratedTests, batch_to_space_v1_2_nhwc_quant8_2) {
     execute(batch_to_space_v1_2::CreateModel_nhwc_quant8_2,
             batch_to_space_v1_2::is_ignored_nhwc_quant8_2,
-            batch_to_space_v1_2::examples_nhwc_quant8_2);
+            batch_to_space_v1_2::get_examples_nhwc_quant8_2());
 }
 
 TEST_F(GeneratedTests, batch_to_space_v1_2_nchw_2) {
     execute(batch_to_space_v1_2::CreateModel_nchw_2,
             batch_to_space_v1_2::is_ignored_nchw_2,
-            batch_to_space_v1_2::examples_nchw_2);
+            batch_to_space_v1_2::get_examples_nchw_2());
 }
 
 TEST_F(GeneratedTests, batch_to_space_v1_2_nchw_relaxed_2) {
     execute(batch_to_space_v1_2::CreateModel_nchw_relaxed_2,
             batch_to_space_v1_2::is_ignored_nchw_relaxed_2,
-            batch_to_space_v1_2::examples_nchw_relaxed_2);
+            batch_to_space_v1_2::get_examples_nchw_relaxed_2());
+}
+
+TEST_F(GeneratedTests, batch_to_space_v1_2_nchw_float16_2) {
+    execute(batch_to_space_v1_2::CreateModel_nchw_float16_2,
+            batch_to_space_v1_2::is_ignored_nchw_float16_2,
+            batch_to_space_v1_2::get_examples_nchw_float16_2());
 }
 
 TEST_F(GeneratedTests, batch_to_space_v1_2_nchw_quant8_2) {
     execute(batch_to_space_v1_2::CreateModel_nchw_quant8_2,
             batch_to_space_v1_2::is_ignored_nchw_quant8_2,
-            batch_to_space_v1_2::examples_nchw_quant8_2);
+            batch_to_space_v1_2::get_examples_nchw_quant8_2());
 }
 
diff --git a/nn/runtime/test/generated/tests/cast.mod.py.cpp b/nn/runtime/test/generated/tests/cast.mod.py.cpp
index f78ec2b..a0150cb 100644
--- a/nn/runtime/test/generated/tests/cast.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/cast.mod.py.cpp
@@ -12,138 +12,138 @@
 TEST_F(GeneratedTests, cast_float16_to_float16) {
     execute(cast::CreateModel,
             cast::is_ignored,
-            cast::examples_float16_to_float16);
+            cast::get_examples_float16_to_float16());
 }
 
 TEST_F(GeneratedTests, cast_float16_to_float32) {
     execute(cast::CreateModel_2,
             cast::is_ignored_2,
-            cast::examples_float16_to_float32);
+            cast::get_examples_float16_to_float32());
 }
 
 TEST_F(GeneratedTests, cast_float16_to_float32_relaxed) {
     execute(cast::CreateModel_relaxed,
             cast::is_ignored_relaxed,
-            cast::examples_float16_to_float32_relaxed);
+            cast::get_examples_float16_to_float32_relaxed());
 }
 
 TEST_F(GeneratedTests, cast_float16_to_int32) {
     execute(cast::CreateModel_3,
             cast::is_ignored_3,
-            cast::examples_float16_to_int32);
+            cast::get_examples_float16_to_int32());
 }
 
 TEST_F(GeneratedTests, cast_float16_to_quant8) {
     execute(cast::CreateModel_4,
             cast::is_ignored_4,
-            cast::examples_float16_to_quant8);
+            cast::get_examples_float16_to_quant8());
 }
 
 TEST_F(GeneratedTests, cast_float32_to_float16) {
     execute(cast::CreateModel_5,
             cast::is_ignored_5,
-            cast::examples_float32_to_float16);
+            cast::get_examples_float32_to_float16());
 }
 
 TEST_F(GeneratedTests, cast_float32_to_float16_relaxed) {
     execute(cast::CreateModel_relaxed_2,
             cast::is_ignored_relaxed_2,
-            cast::examples_float32_to_float16_relaxed);
+            cast::get_examples_float32_to_float16_relaxed());
 }
 
 TEST_F(GeneratedTests, cast_float32_to_float32) {
     execute(cast::CreateModel_6,
             cast::is_ignored_6,
-            cast::examples_float32_to_float32);
+            cast::get_examples_float32_to_float32());
 }
 
 TEST_F(GeneratedTests, cast_float32_to_float32_relaxed) {
     execute(cast::CreateModel_relaxed_3,
             cast::is_ignored_relaxed_3,
-            cast::examples_float32_to_float32_relaxed);
+            cast::get_examples_float32_to_float32_relaxed());
 }
 
 TEST_F(GeneratedTests, cast_float32_to_int32) {
     execute(cast::CreateModel_7,
             cast::is_ignored_7,
-            cast::examples_float32_to_int32);
+            cast::get_examples_float32_to_int32());
 }
 
 TEST_F(GeneratedTests, cast_float32_to_int32_relaxed) {
     execute(cast::CreateModel_relaxed_4,
             cast::is_ignored_relaxed_4,
-            cast::examples_float32_to_int32_relaxed);
+            cast::get_examples_float32_to_int32_relaxed());
 }
 
 TEST_F(GeneratedTests, cast_float32_to_quant8) {
     execute(cast::CreateModel_8,
             cast::is_ignored_8,
-            cast::examples_float32_to_quant8);
+            cast::get_examples_float32_to_quant8());
 }
 
 TEST_F(GeneratedTests, cast_float32_to_quant8_relaxed) {
     execute(cast::CreateModel_relaxed_5,
             cast::is_ignored_relaxed_5,
-            cast::examples_float32_to_quant8_relaxed);
+            cast::get_examples_float32_to_quant8_relaxed());
 }
 
 TEST_F(GeneratedTests, cast_int32_to_float16) {
     execute(cast::CreateModel_9,
             cast::is_ignored_9,
-            cast::examples_int32_to_float16);
+            cast::get_examples_int32_to_float16());
 }
 
 TEST_F(GeneratedTests, cast_int32_to_float32) {
     execute(cast::CreateModel_10,
             cast::is_ignored_10,
-            cast::examples_int32_to_float32);
+            cast::get_examples_int32_to_float32());
 }
 
 TEST_F(GeneratedTests, cast_int32_to_float32_relaxed) {
     execute(cast::CreateModel_relaxed_6,
             cast::is_ignored_relaxed_6,
-            cast::examples_int32_to_float32_relaxed);
+            cast::get_examples_int32_to_float32_relaxed());
 }
 
 TEST_F(GeneratedTests, cast_int32_to_int32) {
     execute(cast::CreateModel_11,
             cast::is_ignored_11,
-            cast::examples_int32_to_int32);
+            cast::get_examples_int32_to_int32());
 }
 
 TEST_F(GeneratedTests, cast_int32_to_quant8) {
     execute(cast::CreateModel_12,
             cast::is_ignored_12,
-            cast::examples_int32_to_quant8);
+            cast::get_examples_int32_to_quant8());
 }
 
 TEST_F(GeneratedTests, cast_quant8_to_float16) {
     execute(cast::CreateModel_13,
             cast::is_ignored_13,
-            cast::examples_quant8_to_float16);
+            cast::get_examples_quant8_to_float16());
 }
 
 TEST_F(GeneratedTests, cast_quant8_to_float32) {
     execute(cast::CreateModel_14,
             cast::is_ignored_14,
-            cast::examples_quant8_to_float32);
+            cast::get_examples_quant8_to_float32());
 }
 
 TEST_F(GeneratedTests, cast_quant8_to_float32_relaxed) {
     execute(cast::CreateModel_relaxed_7,
             cast::is_ignored_relaxed_7,
-            cast::examples_quant8_to_float32_relaxed);
+            cast::get_examples_quant8_to_float32_relaxed());
 }
 
 TEST_F(GeneratedTests, cast_quant8_to_int32) {
     execute(cast::CreateModel_15,
             cast::is_ignored_15,
-            cast::examples_quant8_to_int32);
+            cast::get_examples_quant8_to_int32());
 }
 
 TEST_F(GeneratedTests, cast_quant8_to_quant8) {
     execute(cast::CreateModel_16,
             cast::is_ignored_16,
-            cast::examples_quant8_to_quant8);
+            cast::get_examples_quant8_to_quant8());
 }
 
diff --git a/nn/runtime/test/generated/tests/channel_shuffle.mod.py.cpp b/nn/runtime/test/generated/tests/channel_shuffle.mod.py.cpp
index 1ce00fc..68a281d 100644
--- a/nn/runtime/test/generated/tests/channel_shuffle.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/channel_shuffle.mod.py.cpp
@@ -12,360 +12,360 @@
 TEST_F(GeneratedTests, channel_shuffle_dim4_axis0) {
     execute(channel_shuffle::CreateModel_dim4_axis0,
             channel_shuffle::is_ignored_dim4_axis0,
-            channel_shuffle::examples_dim4_axis0);
+            channel_shuffle::get_examples_dim4_axis0());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_dim4_axis0_neg) {
     execute(channel_shuffle::CreateModel_dim4_axis0_neg,
             channel_shuffle::is_ignored_dim4_axis0_neg,
-            channel_shuffle::examples_dim4_axis0_neg);
+            channel_shuffle::get_examples_dim4_axis0_neg());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_dim4_axis1) {
     execute(channel_shuffle::CreateModel_dim4_axis1,
             channel_shuffle::is_ignored_dim4_axis1,
-            channel_shuffle::examples_dim4_axis1);
+            channel_shuffle::get_examples_dim4_axis1());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_dim4_axis1_neg) {
     execute(channel_shuffle::CreateModel_dim4_axis1_neg,
             channel_shuffle::is_ignored_dim4_axis1_neg,
-            channel_shuffle::examples_dim4_axis1_neg);
+            channel_shuffle::get_examples_dim4_axis1_neg());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_dim4_axis2) {
     execute(channel_shuffle::CreateModel_dim4_axis2,
             channel_shuffle::is_ignored_dim4_axis2,
-            channel_shuffle::examples_dim4_axis2);
+            channel_shuffle::get_examples_dim4_axis2());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_dim4_axis2_neg) {
     execute(channel_shuffle::CreateModel_dim4_axis2_neg,
             channel_shuffle::is_ignored_dim4_axis2_neg,
-            channel_shuffle::examples_dim4_axis2_neg);
+            channel_shuffle::get_examples_dim4_axis2_neg());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_dim4_axis3) {
     execute(channel_shuffle::CreateModel_dim4_axis3,
             channel_shuffle::is_ignored_dim4_axis3,
-            channel_shuffle::examples_dim4_axis3);
+            channel_shuffle::get_examples_dim4_axis3());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_dim4_axis3_neg) {
     execute(channel_shuffle::CreateModel_dim4_axis3_neg,
             channel_shuffle::is_ignored_dim4_axis3_neg,
-            channel_shuffle::examples_dim4_axis3_neg);
+            channel_shuffle::get_examples_dim4_axis3_neg());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_dim3_axis0) {
     execute(channel_shuffle::CreateModel_dim3_axis0,
             channel_shuffle::is_ignored_dim3_axis0,
-            channel_shuffle::examples_dim3_axis0);
+            channel_shuffle::get_examples_dim3_axis0());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_dim3_axis0_neg) {
     execute(channel_shuffle::CreateModel_dim3_axis0_neg,
             channel_shuffle::is_ignored_dim3_axis0_neg,
-            channel_shuffle::examples_dim3_axis0_neg);
+            channel_shuffle::get_examples_dim3_axis0_neg());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_dim3_axis1) {
     execute(channel_shuffle::CreateModel_dim3_axis1,
             channel_shuffle::is_ignored_dim3_axis1,
-            channel_shuffle::examples_dim3_axis1);
+            channel_shuffle::get_examples_dim3_axis1());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_dim3_axis1_neg) {
     execute(channel_shuffle::CreateModel_dim3_axis1_neg,
             channel_shuffle::is_ignored_dim3_axis1_neg,
-            channel_shuffle::examples_dim3_axis1_neg);
+            channel_shuffle::get_examples_dim3_axis1_neg());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_dim3_axis2) {
     execute(channel_shuffle::CreateModel_dim3_axis2,
             channel_shuffle::is_ignored_dim3_axis2,
-            channel_shuffle::examples_dim3_axis2);
+            channel_shuffle::get_examples_dim3_axis2());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_dim3_axis2_neg) {
     execute(channel_shuffle::CreateModel_dim3_axis2_neg,
             channel_shuffle::is_ignored_dim3_axis2_neg,
-            channel_shuffle::examples_dim3_axis2_neg);
+            channel_shuffle::get_examples_dim3_axis2_neg());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_dim2_axis0) {
     execute(channel_shuffle::CreateModel_dim2_axis0,
             channel_shuffle::is_ignored_dim2_axis0,
-            channel_shuffle::examples_dim2_axis0);
+            channel_shuffle::get_examples_dim2_axis0());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_dim2_axis0_neg) {
     execute(channel_shuffle::CreateModel_dim2_axis0_neg,
             channel_shuffle::is_ignored_dim2_axis0_neg,
-            channel_shuffle::examples_dim2_axis0_neg);
+            channel_shuffle::get_examples_dim2_axis0_neg());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_dim2_axis1) {
     execute(channel_shuffle::CreateModel_dim2_axis1,
             channel_shuffle::is_ignored_dim2_axis1,
-            channel_shuffle::examples_dim2_axis1);
+            channel_shuffle::get_examples_dim2_axis1());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_dim2_axis1_neg) {
     execute(channel_shuffle::CreateModel_dim2_axis1_neg,
             channel_shuffle::is_ignored_dim2_axis1_neg,
-            channel_shuffle::examples_dim2_axis1_neg);
+            channel_shuffle::get_examples_dim2_axis1_neg());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_dim1_axis0) {
     execute(channel_shuffle::CreateModel_dim1_axis0,
             channel_shuffle::is_ignored_dim1_axis0,
-            channel_shuffle::examples_dim1_axis0);
+            channel_shuffle::get_examples_dim1_axis0());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_dim1_axis0_neg) {
     execute(channel_shuffle::CreateModel_dim1_axis0_neg,
             channel_shuffle::is_ignored_dim1_axis0_neg,
-            channel_shuffle::examples_dim1_axis0_neg);
+            channel_shuffle::get_examples_dim1_axis0_neg());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_relaxed_dim4_axis0) {
     execute(channel_shuffle::CreateModel_relaxed_dim4_axis0,
             channel_shuffle::is_ignored_relaxed_dim4_axis0,
-            channel_shuffle::examples_relaxed_dim4_axis0);
+            channel_shuffle::get_examples_relaxed_dim4_axis0());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_relaxed_dim4_axis0_neg) {
     execute(channel_shuffle::CreateModel_relaxed_dim4_axis0_neg,
             channel_shuffle::is_ignored_relaxed_dim4_axis0_neg,
-            channel_shuffle::examples_relaxed_dim4_axis0_neg);
+            channel_shuffle::get_examples_relaxed_dim4_axis0_neg());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_relaxed_dim4_axis1) {
     execute(channel_shuffle::CreateModel_relaxed_dim4_axis1,
             channel_shuffle::is_ignored_relaxed_dim4_axis1,
-            channel_shuffle::examples_relaxed_dim4_axis1);
+            channel_shuffle::get_examples_relaxed_dim4_axis1());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_relaxed_dim4_axis1_neg) {
     execute(channel_shuffle::CreateModel_relaxed_dim4_axis1_neg,
             channel_shuffle::is_ignored_relaxed_dim4_axis1_neg,
-            channel_shuffle::examples_relaxed_dim4_axis1_neg);
+            channel_shuffle::get_examples_relaxed_dim4_axis1_neg());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_relaxed_dim4_axis2) {
     execute(channel_shuffle::CreateModel_relaxed_dim4_axis2,
             channel_shuffle::is_ignored_relaxed_dim4_axis2,
-            channel_shuffle::examples_relaxed_dim4_axis2);
+            channel_shuffle::get_examples_relaxed_dim4_axis2());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_relaxed_dim4_axis2_neg) {
     execute(channel_shuffle::CreateModel_relaxed_dim4_axis2_neg,
             channel_shuffle::is_ignored_relaxed_dim4_axis2_neg,
-            channel_shuffle::examples_relaxed_dim4_axis2_neg);
+            channel_shuffle::get_examples_relaxed_dim4_axis2_neg());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_relaxed_dim4_axis3) {
     execute(channel_shuffle::CreateModel_relaxed_dim4_axis3,
             channel_shuffle::is_ignored_relaxed_dim4_axis3,
-            channel_shuffle::examples_relaxed_dim4_axis3);
+            channel_shuffle::get_examples_relaxed_dim4_axis3());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_relaxed_dim4_axis3_neg) {
     execute(channel_shuffle::CreateModel_relaxed_dim4_axis3_neg,
             channel_shuffle::is_ignored_relaxed_dim4_axis3_neg,
-            channel_shuffle::examples_relaxed_dim4_axis3_neg);
+            channel_shuffle::get_examples_relaxed_dim4_axis3_neg());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_relaxed_dim3_axis0) {
     execute(channel_shuffle::CreateModel_relaxed_dim3_axis0,
             channel_shuffle::is_ignored_relaxed_dim3_axis0,
-            channel_shuffle::examples_relaxed_dim3_axis0);
+            channel_shuffle::get_examples_relaxed_dim3_axis0());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_relaxed_dim3_axis0_neg) {
     execute(channel_shuffle::CreateModel_relaxed_dim3_axis0_neg,
             channel_shuffle::is_ignored_relaxed_dim3_axis0_neg,
-            channel_shuffle::examples_relaxed_dim3_axis0_neg);
+            channel_shuffle::get_examples_relaxed_dim3_axis0_neg());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_relaxed_dim3_axis1) {
     execute(channel_shuffle::CreateModel_relaxed_dim3_axis1,
             channel_shuffle::is_ignored_relaxed_dim3_axis1,
-            channel_shuffle::examples_relaxed_dim3_axis1);
+            channel_shuffle::get_examples_relaxed_dim3_axis1());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_relaxed_dim3_axis1_neg) {
     execute(channel_shuffle::CreateModel_relaxed_dim3_axis1_neg,
             channel_shuffle::is_ignored_relaxed_dim3_axis1_neg,
-            channel_shuffle::examples_relaxed_dim3_axis1_neg);
+            channel_shuffle::get_examples_relaxed_dim3_axis1_neg());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_relaxed_dim3_axis2) {
     execute(channel_shuffle::CreateModel_relaxed_dim3_axis2,
             channel_shuffle::is_ignored_relaxed_dim3_axis2,
-            channel_shuffle::examples_relaxed_dim3_axis2);
+            channel_shuffle::get_examples_relaxed_dim3_axis2());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_relaxed_dim3_axis2_neg) {
     execute(channel_shuffle::CreateModel_relaxed_dim3_axis2_neg,
             channel_shuffle::is_ignored_relaxed_dim3_axis2_neg,
-            channel_shuffle::examples_relaxed_dim3_axis2_neg);
+            channel_shuffle::get_examples_relaxed_dim3_axis2_neg());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_relaxed_dim2_axis0) {
     execute(channel_shuffle::CreateModel_relaxed_dim2_axis0,
             channel_shuffle::is_ignored_relaxed_dim2_axis0,
-            channel_shuffle::examples_relaxed_dim2_axis0);
+            channel_shuffle::get_examples_relaxed_dim2_axis0());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_relaxed_dim2_axis0_neg) {
     execute(channel_shuffle::CreateModel_relaxed_dim2_axis0_neg,
             channel_shuffle::is_ignored_relaxed_dim2_axis0_neg,
-            channel_shuffle::examples_relaxed_dim2_axis0_neg);
+            channel_shuffle::get_examples_relaxed_dim2_axis0_neg());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_relaxed_dim2_axis1) {
     execute(channel_shuffle::CreateModel_relaxed_dim2_axis1,
             channel_shuffle::is_ignored_relaxed_dim2_axis1,
-            channel_shuffle::examples_relaxed_dim2_axis1);
+            channel_shuffle::get_examples_relaxed_dim2_axis1());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_relaxed_dim2_axis1_neg) {
     execute(channel_shuffle::CreateModel_relaxed_dim2_axis1_neg,
             channel_shuffle::is_ignored_relaxed_dim2_axis1_neg,
-            channel_shuffle::examples_relaxed_dim2_axis1_neg);
+            channel_shuffle::get_examples_relaxed_dim2_axis1_neg());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_relaxed_dim1_axis0) {
     execute(channel_shuffle::CreateModel_relaxed_dim1_axis0,
             channel_shuffle::is_ignored_relaxed_dim1_axis0,
-            channel_shuffle::examples_relaxed_dim1_axis0);
+            channel_shuffle::get_examples_relaxed_dim1_axis0());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_relaxed_dim1_axis0_neg) {
     execute(channel_shuffle::CreateModel_relaxed_dim1_axis0_neg,
             channel_shuffle::is_ignored_relaxed_dim1_axis0_neg,
-            channel_shuffle::examples_relaxed_dim1_axis0_neg);
+            channel_shuffle::get_examples_relaxed_dim1_axis0_neg());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_quant8_dim4_axis0) {
     execute(channel_shuffle::CreateModel_quant8_dim4_axis0,
             channel_shuffle::is_ignored_quant8_dim4_axis0,
-            channel_shuffle::examples_quant8_dim4_axis0);
+            channel_shuffle::get_examples_quant8_dim4_axis0());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_quant8_dim4_axis0_neg) {
     execute(channel_shuffle::CreateModel_quant8_dim4_axis0_neg,
             channel_shuffle::is_ignored_quant8_dim4_axis0_neg,
-            channel_shuffle::examples_quant8_dim4_axis0_neg);
+            channel_shuffle::get_examples_quant8_dim4_axis0_neg());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_quant8_dim4_axis1) {
     execute(channel_shuffle::CreateModel_quant8_dim4_axis1,
             channel_shuffle::is_ignored_quant8_dim4_axis1,
-            channel_shuffle::examples_quant8_dim4_axis1);
+            channel_shuffle::get_examples_quant8_dim4_axis1());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_quant8_dim4_axis1_neg) {
     execute(channel_shuffle::CreateModel_quant8_dim4_axis1_neg,
             channel_shuffle::is_ignored_quant8_dim4_axis1_neg,
-            channel_shuffle::examples_quant8_dim4_axis1_neg);
+            channel_shuffle::get_examples_quant8_dim4_axis1_neg());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_quant8_dim4_axis2) {
     execute(channel_shuffle::CreateModel_quant8_dim4_axis2,
             channel_shuffle::is_ignored_quant8_dim4_axis2,
-            channel_shuffle::examples_quant8_dim4_axis2);
+            channel_shuffle::get_examples_quant8_dim4_axis2());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_quant8_dim4_axis2_neg) {
     execute(channel_shuffle::CreateModel_quant8_dim4_axis2_neg,
             channel_shuffle::is_ignored_quant8_dim4_axis2_neg,
-            channel_shuffle::examples_quant8_dim4_axis2_neg);
+            channel_shuffle::get_examples_quant8_dim4_axis2_neg());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_quant8_dim4_axis3) {
     execute(channel_shuffle::CreateModel_quant8_dim4_axis3,
             channel_shuffle::is_ignored_quant8_dim4_axis3,
-            channel_shuffle::examples_quant8_dim4_axis3);
+            channel_shuffle::get_examples_quant8_dim4_axis3());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_quant8_dim4_axis3_neg) {
     execute(channel_shuffle::CreateModel_quant8_dim4_axis3_neg,
             channel_shuffle::is_ignored_quant8_dim4_axis3_neg,
-            channel_shuffle::examples_quant8_dim4_axis3_neg);
+            channel_shuffle::get_examples_quant8_dim4_axis3_neg());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_quant8_dim3_axis0) {
     execute(channel_shuffle::CreateModel_quant8_dim3_axis0,
             channel_shuffle::is_ignored_quant8_dim3_axis0,
-            channel_shuffle::examples_quant8_dim3_axis0);
+            channel_shuffle::get_examples_quant8_dim3_axis0());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_quant8_dim3_axis0_neg) {
     execute(channel_shuffle::CreateModel_quant8_dim3_axis0_neg,
             channel_shuffle::is_ignored_quant8_dim3_axis0_neg,
-            channel_shuffle::examples_quant8_dim3_axis0_neg);
+            channel_shuffle::get_examples_quant8_dim3_axis0_neg());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_quant8_dim3_axis1) {
     execute(channel_shuffle::CreateModel_quant8_dim3_axis1,
             channel_shuffle::is_ignored_quant8_dim3_axis1,
-            channel_shuffle::examples_quant8_dim3_axis1);
+            channel_shuffle::get_examples_quant8_dim3_axis1());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_quant8_dim3_axis1_neg) {
     execute(channel_shuffle::CreateModel_quant8_dim3_axis1_neg,
             channel_shuffle::is_ignored_quant8_dim3_axis1_neg,
-            channel_shuffle::examples_quant8_dim3_axis1_neg);
+            channel_shuffle::get_examples_quant8_dim3_axis1_neg());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_quant8_dim3_axis2) {
     execute(channel_shuffle::CreateModel_quant8_dim3_axis2,
             channel_shuffle::is_ignored_quant8_dim3_axis2,
-            channel_shuffle::examples_quant8_dim3_axis2);
+            channel_shuffle::get_examples_quant8_dim3_axis2());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_quant8_dim3_axis2_neg) {
     execute(channel_shuffle::CreateModel_quant8_dim3_axis2_neg,
             channel_shuffle::is_ignored_quant8_dim3_axis2_neg,
-            channel_shuffle::examples_quant8_dim3_axis2_neg);
+            channel_shuffle::get_examples_quant8_dim3_axis2_neg());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_quant8_dim2_axis0) {
     execute(channel_shuffle::CreateModel_quant8_dim2_axis0,
             channel_shuffle::is_ignored_quant8_dim2_axis0,
-            channel_shuffle::examples_quant8_dim2_axis0);
+            channel_shuffle::get_examples_quant8_dim2_axis0());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_quant8_dim2_axis0_neg) {
     execute(channel_shuffle::CreateModel_quant8_dim2_axis0_neg,
             channel_shuffle::is_ignored_quant8_dim2_axis0_neg,
-            channel_shuffle::examples_quant8_dim2_axis0_neg);
+            channel_shuffle::get_examples_quant8_dim2_axis0_neg());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_quant8_dim2_axis1) {
     execute(channel_shuffle::CreateModel_quant8_dim2_axis1,
             channel_shuffle::is_ignored_quant8_dim2_axis1,
-            channel_shuffle::examples_quant8_dim2_axis1);
+            channel_shuffle::get_examples_quant8_dim2_axis1());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_quant8_dim2_axis1_neg) {
     execute(channel_shuffle::CreateModel_quant8_dim2_axis1_neg,
             channel_shuffle::is_ignored_quant8_dim2_axis1_neg,
-            channel_shuffle::examples_quant8_dim2_axis1_neg);
+            channel_shuffle::get_examples_quant8_dim2_axis1_neg());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_quant8_dim1_axis0) {
     execute(channel_shuffle::CreateModel_quant8_dim1_axis0,
             channel_shuffle::is_ignored_quant8_dim1_axis0,
-            channel_shuffle::examples_quant8_dim1_axis0);
+            channel_shuffle::get_examples_quant8_dim1_axis0());
 }
 
 TEST_F(GeneratedTests, channel_shuffle_quant8_dim1_axis0_neg) {
     execute(channel_shuffle::CreateModel_quant8_dim1_axis0_neg,
             channel_shuffle::is_ignored_quant8_dim1_axis0_neg,
-            channel_shuffle::examples_quant8_dim1_axis0_neg);
+            channel_shuffle::get_examples_quant8_dim1_axis0_neg());
 }
 
diff --git a/nn/runtime/test/generated/tests/concat_float16_1.mod.py.cpp b/nn/runtime/test/generated/tests/concat_float16_1.mod.py.cpp
new file mode 100644
index 0000000..0b391dd
--- /dev/null
+++ b/nn/runtime/test/generated/tests/concat_float16_1.mod.py.cpp
@@ -0,0 +1,17 @@
+// clang-format off
+// Generated file (from: concat_float16_1.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace concat_float16_1 {
+// Generated concat_float16_1 test
+#include "generated/examples/concat_float16_1.example.cpp"
+// Generated model constructor
+#include "generated/models/concat_float16_1.model.cpp"
+} // namespace concat_float16_1
+
+TEST_F(GeneratedTests, concat_float16_1) {
+    execute(concat_float16_1::CreateModel,
+            concat_float16_1::is_ignored,
+            concat_float16_1::get_examples());
+}
+
diff --git a/nn/runtime/test/generated/tests/concat_float16_2.mod.py.cpp b/nn/runtime/test/generated/tests/concat_float16_2.mod.py.cpp
new file mode 100644
index 0000000..8ab2d1d
--- /dev/null
+++ b/nn/runtime/test/generated/tests/concat_float16_2.mod.py.cpp
@@ -0,0 +1,17 @@
+// clang-format off
+// Generated file (from: concat_float16_2.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace concat_float16_2 {
+// Generated concat_float16_2 test
+#include "generated/examples/concat_float16_2.example.cpp"
+// Generated model constructor
+#include "generated/models/concat_float16_2.model.cpp"
+} // namespace concat_float16_2
+
+TEST_F(GeneratedTests, concat_float16_2) {
+    execute(concat_float16_2::CreateModel,
+            concat_float16_2::is_ignored,
+            concat_float16_2::get_examples());
+}
+
diff --git a/nn/runtime/test/generated/tests/concat_float16_3.mod.py.cpp b/nn/runtime/test/generated/tests/concat_float16_3.mod.py.cpp
new file mode 100644
index 0000000..92d87a2
--- /dev/null
+++ b/nn/runtime/test/generated/tests/concat_float16_3.mod.py.cpp
@@ -0,0 +1,17 @@
+// clang-format off
+// Generated file (from: concat_float16_3.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace concat_float16_3 {
+// Generated concat_float16_3 test
+#include "generated/examples/concat_float16_3.example.cpp"
+// Generated model constructor
+#include "generated/models/concat_float16_3.model.cpp"
+} // namespace concat_float16_3
+
+TEST_F(GeneratedTests, concat_float16_3) {
+    execute(concat_float16_3::CreateModel,
+            concat_float16_3::is_ignored,
+            concat_float16_3::get_examples());
+}
+
diff --git a/nn/runtime/test/generated/tests/concat_float_1.mod.py.cpp b/nn/runtime/test/generated/tests/concat_float_1.mod.py.cpp
index 24a2d8f..1af832b 100644
--- a/nn/runtime/test/generated/tests/concat_float_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/concat_float_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, concat_float_1) {
     execute(concat_float_1::CreateModel,
             concat_float_1::is_ignored,
-            concat_float_1::examples);
+            concat_float_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/concat_float_1_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/concat_float_1_relaxed.mod.py.cpp
index eea473e..d4e827f 100644
--- a/nn/runtime/test/generated/tests/concat_float_1_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/concat_float_1_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, concat_float_1_relaxed) {
     execute(concat_float_1_relaxed::CreateModel,
             concat_float_1_relaxed::is_ignored,
-            concat_float_1_relaxed::examples);
+            concat_float_1_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/concat_float_2.mod.py.cpp b/nn/runtime/test/generated/tests/concat_float_2.mod.py.cpp
index 337b3d0..fdb84fa 100644
--- a/nn/runtime/test/generated/tests/concat_float_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/concat_float_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, concat_float_2) {
     execute(concat_float_2::CreateModel,
             concat_float_2::is_ignored,
-            concat_float_2::examples);
+            concat_float_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/concat_float_2_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/concat_float_2_relaxed.mod.py.cpp
index 185429e..2c07144 100644
--- a/nn/runtime/test/generated/tests/concat_float_2_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/concat_float_2_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, concat_float_2_relaxed) {
     execute(concat_float_2_relaxed::CreateModel,
             concat_float_2_relaxed::is_ignored,
-            concat_float_2_relaxed::examples);
+            concat_float_2_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/concat_float_3.mod.py.cpp b/nn/runtime/test/generated/tests/concat_float_3.mod.py.cpp
index 845cb7a..ef92530 100644
--- a/nn/runtime/test/generated/tests/concat_float_3.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/concat_float_3.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, concat_float_3) {
     execute(concat_float_3::CreateModel,
             concat_float_3::is_ignored,
-            concat_float_3::examples);
+            concat_float_3::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/concat_float_3_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/concat_float_3_relaxed.mod.py.cpp
index 3a44191..544d1dd 100644
--- a/nn/runtime/test/generated/tests/concat_float_3_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/concat_float_3_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, concat_float_3_relaxed) {
     execute(concat_float_3_relaxed::CreateModel,
             concat_float_3_relaxed::is_ignored,
-            concat_float_3_relaxed::examples);
+            concat_float_3_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/concat_mixed_quant.mod.py.cpp b/nn/runtime/test/generated/tests/concat_mixed_quant.mod.py.cpp
new file mode 100644
index 0000000..9ff2e60
--- /dev/null
+++ b/nn/runtime/test/generated/tests/concat_mixed_quant.mod.py.cpp
@@ -0,0 +1,23 @@
+// clang-format off
+// Generated file (from: concat_mixed_quant.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace concat_mixed_quant {
+// Generated concat_mixed_quant test
+#include "generated/examples/concat_mixed_quant.example.cpp"
+// Generated model constructor
+#include "generated/models/concat_mixed_quant.model.cpp"
+} // namespace concat_mixed_quant
+
+TEST_F(GeneratedTests, concat_mixed_quant_quant8) {
+    execute(concat_mixed_quant::CreateModel_quant8,
+            concat_mixed_quant::is_ignored_quant8,
+            concat_mixed_quant::get_examples_quant8());
+}
+
+TEST_F(GeneratedTests, concat_mixed_quant_quant8_2) {
+    execute(concat_mixed_quant::CreateModel_quant8_2,
+            concat_mixed_quant::is_ignored_quant8_2,
+            concat_mixed_quant::get_examples_quant8_2());
+}
+
diff --git a/nn/runtime/test/generated/tests/concat_quant8_1.mod.py.cpp b/nn/runtime/test/generated/tests/concat_quant8_1.mod.py.cpp
index f07b928..4b74c04 100644
--- a/nn/runtime/test/generated/tests/concat_quant8_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/concat_quant8_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, concat_quant8_1) {
     execute(concat_quant8_1::CreateModel,
             concat_quant8_1::is_ignored,
-            concat_quant8_1::examples);
+            concat_quant8_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/concat_quant8_2.mod.py.cpp b/nn/runtime/test/generated/tests/concat_quant8_2.mod.py.cpp
index 3685ccc..17e9b18 100644
--- a/nn/runtime/test/generated/tests/concat_quant8_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/concat_quant8_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, concat_quant8_2) {
     execute(concat_quant8_2::CreateModel,
             concat_quant8_2::is_ignored,
-            concat_quant8_2::examples);
+            concat_quant8_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/concat_quant8_3.mod.py.cpp b/nn/runtime/test/generated/tests/concat_quant8_3.mod.py.cpp
index 1d2e9fb..eddbdef 100644
--- a/nn/runtime/test/generated/tests/concat_quant8_3.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/concat_quant8_3.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, concat_quant8_3) {
     execute(concat_quant8_3::CreateModel,
             concat_quant8_3::is_ignored,
-            concat_quant8_3::examples);
+            concat_quant8_3::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv2d_v1_2.mod.py.cpp b/nn/runtime/test/generated/tests/conv2d_v1_2.mod.py.cpp
index 6ab7c23..784f2bd 100644
--- a/nn/runtime/test/generated/tests/conv2d_v1_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv2d_v1_2.mod.py.cpp
@@ -12,384 +12,384 @@
 TEST_F(GeneratedTests, conv2d_v1_2_nhwc) {
     execute(conv2d_v1_2::CreateModel_nhwc,
             conv2d_v1_2::is_ignored_nhwc,
-            conv2d_v1_2::examples_nhwc);
+            conv2d_v1_2::get_examples_nhwc());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_nhwc_relaxed) {
     execute(conv2d_v1_2::CreateModel_nhwc_relaxed,
             conv2d_v1_2::is_ignored_nhwc_relaxed,
-            conv2d_v1_2::examples_nhwc_relaxed);
+            conv2d_v1_2::get_examples_nhwc_relaxed());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_nhwc_quant8) {
     execute(conv2d_v1_2::CreateModel_nhwc_quant8,
             conv2d_v1_2::is_ignored_nhwc_quant8,
-            conv2d_v1_2::examples_nhwc_quant8);
+            conv2d_v1_2::get_examples_nhwc_quant8());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_nhwc_weight_as_input) {
     execute(conv2d_v1_2::CreateModel_nhwc_weight_as_input,
             conv2d_v1_2::is_ignored_nhwc_weight_as_input,
-            conv2d_v1_2::examples_nhwc_weight_as_input);
+            conv2d_v1_2::get_examples_nhwc_weight_as_input());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_nhwc_weight_as_input_relaxed) {
     execute(conv2d_v1_2::CreateModel_nhwc_weight_as_input_relaxed,
             conv2d_v1_2::is_ignored_nhwc_weight_as_input_relaxed,
-            conv2d_v1_2::examples_nhwc_weight_as_input_relaxed);
+            conv2d_v1_2::get_examples_nhwc_weight_as_input_relaxed());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_nhwc_weight_as_input_quant8) {
     execute(conv2d_v1_2::CreateModel_nhwc_weight_as_input_quant8,
             conv2d_v1_2::is_ignored_nhwc_weight_as_input_quant8,
-            conv2d_v1_2::examples_nhwc_weight_as_input_quant8);
+            conv2d_v1_2::get_examples_nhwc_weight_as_input_quant8());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_nchw) {
     execute(conv2d_v1_2::CreateModel_nchw,
             conv2d_v1_2::is_ignored_nchw,
-            conv2d_v1_2::examples_nchw);
+            conv2d_v1_2::get_examples_nchw());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_nchw_relaxed) {
     execute(conv2d_v1_2::CreateModel_nchw_relaxed,
             conv2d_v1_2::is_ignored_nchw_relaxed,
-            conv2d_v1_2::examples_nchw_relaxed);
+            conv2d_v1_2::get_examples_nchw_relaxed());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_nchw_quant8) {
     execute(conv2d_v1_2::CreateModel_nchw_quant8,
             conv2d_v1_2::is_ignored_nchw_quant8,
-            conv2d_v1_2::examples_nchw_quant8);
+            conv2d_v1_2::get_examples_nchw_quant8());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_nchw_weight_as_input) {
     execute(conv2d_v1_2::CreateModel_nchw_weight_as_input,
             conv2d_v1_2::is_ignored_nchw_weight_as_input,
-            conv2d_v1_2::examples_nchw_weight_as_input);
+            conv2d_v1_2::get_examples_nchw_weight_as_input());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_nchw_weight_as_input_relaxed) {
     execute(conv2d_v1_2::CreateModel_nchw_weight_as_input_relaxed,
             conv2d_v1_2::is_ignored_nchw_weight_as_input_relaxed,
-            conv2d_v1_2::examples_nchw_weight_as_input_relaxed);
+            conv2d_v1_2::get_examples_nchw_weight_as_input_relaxed());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_nchw_weight_as_input_quant8) {
     execute(conv2d_v1_2::CreateModel_nchw_weight_as_input_quant8,
             conv2d_v1_2::is_ignored_nchw_weight_as_input_quant8,
-            conv2d_v1_2::examples_nchw_weight_as_input_quant8);
+            conv2d_v1_2::get_examples_nchw_weight_as_input_quant8());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_nhwc_2) {
     execute(conv2d_v1_2::CreateModel_nhwc_2,
             conv2d_v1_2::is_ignored_nhwc_2,
-            conv2d_v1_2::examples_nhwc_2);
+            conv2d_v1_2::get_examples_nhwc_2());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_nhwc_relaxed_2) {
     execute(conv2d_v1_2::CreateModel_nhwc_relaxed_2,
             conv2d_v1_2::is_ignored_nhwc_relaxed_2,
-            conv2d_v1_2::examples_nhwc_relaxed_2);
+            conv2d_v1_2::get_examples_nhwc_relaxed_2());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_nhwc_quant8_2) {
     execute(conv2d_v1_2::CreateModel_nhwc_quant8_2,
             conv2d_v1_2::is_ignored_nhwc_quant8_2,
-            conv2d_v1_2::examples_nhwc_quant8_2);
+            conv2d_v1_2::get_examples_nhwc_quant8_2());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_nhwc_weight_as_input_2) {
     execute(conv2d_v1_2::CreateModel_nhwc_weight_as_input_2,
             conv2d_v1_2::is_ignored_nhwc_weight_as_input_2,
-            conv2d_v1_2::examples_nhwc_weight_as_input_2);
+            conv2d_v1_2::get_examples_nhwc_weight_as_input_2());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_nhwc_weight_as_input_relaxed_2) {
     execute(conv2d_v1_2::CreateModel_nhwc_weight_as_input_relaxed_2,
             conv2d_v1_2::is_ignored_nhwc_weight_as_input_relaxed_2,
-            conv2d_v1_2::examples_nhwc_weight_as_input_relaxed_2);
+            conv2d_v1_2::get_examples_nhwc_weight_as_input_relaxed_2());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_nhwc_weight_as_input_quant8_2) {
     execute(conv2d_v1_2::CreateModel_nhwc_weight_as_input_quant8_2,
             conv2d_v1_2::is_ignored_nhwc_weight_as_input_quant8_2,
-            conv2d_v1_2::examples_nhwc_weight_as_input_quant8_2);
+            conv2d_v1_2::get_examples_nhwc_weight_as_input_quant8_2());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_nchw_2) {
     execute(conv2d_v1_2::CreateModel_nchw_2,
             conv2d_v1_2::is_ignored_nchw_2,
-            conv2d_v1_2::examples_nchw_2);
+            conv2d_v1_2::get_examples_nchw_2());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_nchw_relaxed_2) {
     execute(conv2d_v1_2::CreateModel_nchw_relaxed_2,
             conv2d_v1_2::is_ignored_nchw_relaxed_2,
-            conv2d_v1_2::examples_nchw_relaxed_2);
+            conv2d_v1_2::get_examples_nchw_relaxed_2());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_nchw_quant8_2) {
     execute(conv2d_v1_2::CreateModel_nchw_quant8_2,
             conv2d_v1_2::is_ignored_nchw_quant8_2,
-            conv2d_v1_2::examples_nchw_quant8_2);
+            conv2d_v1_2::get_examples_nchw_quant8_2());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_nchw_weight_as_input_2) {
     execute(conv2d_v1_2::CreateModel_nchw_weight_as_input_2,
             conv2d_v1_2::is_ignored_nchw_weight_as_input_2,
-            conv2d_v1_2::examples_nchw_weight_as_input_2);
+            conv2d_v1_2::get_examples_nchw_weight_as_input_2());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_nchw_weight_as_input_relaxed_2) {
     execute(conv2d_v1_2::CreateModel_nchw_weight_as_input_relaxed_2,
             conv2d_v1_2::is_ignored_nchw_weight_as_input_relaxed_2,
-            conv2d_v1_2::examples_nchw_weight_as_input_relaxed_2);
+            conv2d_v1_2::get_examples_nchw_weight_as_input_relaxed_2());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_nchw_weight_as_input_quant8_2) {
     execute(conv2d_v1_2::CreateModel_nchw_weight_as_input_quant8_2,
             conv2d_v1_2::is_ignored_nchw_weight_as_input_quant8_2,
-            conv2d_v1_2::examples_nchw_weight_as_input_quant8_2);
+            conv2d_v1_2::get_examples_nchw_weight_as_input_quant8_2());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_channel_nhwc) {
     execute(conv2d_v1_2::CreateModel_channel_nhwc,
             conv2d_v1_2::is_ignored_channel_nhwc,
-            conv2d_v1_2::examples_channel_nhwc);
+            conv2d_v1_2::get_examples_channel_nhwc());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_channel_nhwc_relaxed) {
     execute(conv2d_v1_2::CreateModel_channel_nhwc_relaxed,
             conv2d_v1_2::is_ignored_channel_nhwc_relaxed,
-            conv2d_v1_2::examples_channel_nhwc_relaxed);
+            conv2d_v1_2::get_examples_channel_nhwc_relaxed());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_channel_nhwc_quant8) {
     execute(conv2d_v1_2::CreateModel_channel_nhwc_quant8,
             conv2d_v1_2::is_ignored_channel_nhwc_quant8,
-            conv2d_v1_2::examples_channel_nhwc_quant8);
+            conv2d_v1_2::get_examples_channel_nhwc_quant8());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_channel_nhwc_weight_as_input) {
     execute(conv2d_v1_2::CreateModel_channel_nhwc_weight_as_input,
             conv2d_v1_2::is_ignored_channel_nhwc_weight_as_input,
-            conv2d_v1_2::examples_channel_nhwc_weight_as_input);
+            conv2d_v1_2::get_examples_channel_nhwc_weight_as_input());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_channel_nhwc_weight_as_input_relaxed) {
     execute(conv2d_v1_2::CreateModel_channel_nhwc_weight_as_input_relaxed,
             conv2d_v1_2::is_ignored_channel_nhwc_weight_as_input_relaxed,
-            conv2d_v1_2::examples_channel_nhwc_weight_as_input_relaxed);
+            conv2d_v1_2::get_examples_channel_nhwc_weight_as_input_relaxed());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_channel_nhwc_weight_as_input_quant8) {
     execute(conv2d_v1_2::CreateModel_channel_nhwc_weight_as_input_quant8,
             conv2d_v1_2::is_ignored_channel_nhwc_weight_as_input_quant8,
-            conv2d_v1_2::examples_channel_nhwc_weight_as_input_quant8);
+            conv2d_v1_2::get_examples_channel_nhwc_weight_as_input_quant8());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_channel_nchw) {
     execute(conv2d_v1_2::CreateModel_channel_nchw,
             conv2d_v1_2::is_ignored_channel_nchw,
-            conv2d_v1_2::examples_channel_nchw);
+            conv2d_v1_2::get_examples_channel_nchw());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_channel_nchw_relaxed) {
     execute(conv2d_v1_2::CreateModel_channel_nchw_relaxed,
             conv2d_v1_2::is_ignored_channel_nchw_relaxed,
-            conv2d_v1_2::examples_channel_nchw_relaxed);
+            conv2d_v1_2::get_examples_channel_nchw_relaxed());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_channel_nchw_quant8) {
     execute(conv2d_v1_2::CreateModel_channel_nchw_quant8,
             conv2d_v1_2::is_ignored_channel_nchw_quant8,
-            conv2d_v1_2::examples_channel_nchw_quant8);
+            conv2d_v1_2::get_examples_channel_nchw_quant8());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_channel_nchw_weight_as_input) {
     execute(conv2d_v1_2::CreateModel_channel_nchw_weight_as_input,
             conv2d_v1_2::is_ignored_channel_nchw_weight_as_input,
-            conv2d_v1_2::examples_channel_nchw_weight_as_input);
+            conv2d_v1_2::get_examples_channel_nchw_weight_as_input());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_channel_nchw_weight_as_input_relaxed) {
     execute(conv2d_v1_2::CreateModel_channel_nchw_weight_as_input_relaxed,
             conv2d_v1_2::is_ignored_channel_nchw_weight_as_input_relaxed,
-            conv2d_v1_2::examples_channel_nchw_weight_as_input_relaxed);
+            conv2d_v1_2::get_examples_channel_nchw_weight_as_input_relaxed());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_channel_nchw_weight_as_input_quant8) {
     execute(conv2d_v1_2::CreateModel_channel_nchw_weight_as_input_quant8,
             conv2d_v1_2::is_ignored_channel_nchw_weight_as_input_quant8,
-            conv2d_v1_2::examples_channel_nchw_weight_as_input_quant8);
+            conv2d_v1_2::get_examples_channel_nchw_weight_as_input_quant8());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_large_nhwc) {
     execute(conv2d_v1_2::CreateModel_large_nhwc,
             conv2d_v1_2::is_ignored_large_nhwc,
-            conv2d_v1_2::examples_large_nhwc);
+            conv2d_v1_2::get_examples_large_nhwc());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_large_nhwc_relaxed) {
     execute(conv2d_v1_2::CreateModel_large_nhwc_relaxed,
             conv2d_v1_2::is_ignored_large_nhwc_relaxed,
-            conv2d_v1_2::examples_large_nhwc_relaxed);
+            conv2d_v1_2::get_examples_large_nhwc_relaxed());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_large_nhwc_quant8) {
     execute(conv2d_v1_2::CreateModel_large_nhwc_quant8,
             conv2d_v1_2::is_ignored_large_nhwc_quant8,
-            conv2d_v1_2::examples_large_nhwc_quant8);
+            conv2d_v1_2::get_examples_large_nhwc_quant8());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_large_nhwc_weight_as_input) {
     execute(conv2d_v1_2::CreateModel_large_nhwc_weight_as_input,
             conv2d_v1_2::is_ignored_large_nhwc_weight_as_input,
-            conv2d_v1_2::examples_large_nhwc_weight_as_input);
+            conv2d_v1_2::get_examples_large_nhwc_weight_as_input());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_large_nhwc_weight_as_input_relaxed) {
     execute(conv2d_v1_2::CreateModel_large_nhwc_weight_as_input_relaxed,
             conv2d_v1_2::is_ignored_large_nhwc_weight_as_input_relaxed,
-            conv2d_v1_2::examples_large_nhwc_weight_as_input_relaxed);
+            conv2d_v1_2::get_examples_large_nhwc_weight_as_input_relaxed());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_large_nhwc_weight_as_input_quant8) {
     execute(conv2d_v1_2::CreateModel_large_nhwc_weight_as_input_quant8,
             conv2d_v1_2::is_ignored_large_nhwc_weight_as_input_quant8,
-            conv2d_v1_2::examples_large_nhwc_weight_as_input_quant8);
+            conv2d_v1_2::get_examples_large_nhwc_weight_as_input_quant8());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_large_nchw) {
     execute(conv2d_v1_2::CreateModel_large_nchw,
             conv2d_v1_2::is_ignored_large_nchw,
-            conv2d_v1_2::examples_large_nchw);
+            conv2d_v1_2::get_examples_large_nchw());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_large_nchw_relaxed) {
     execute(conv2d_v1_2::CreateModel_large_nchw_relaxed,
             conv2d_v1_2::is_ignored_large_nchw_relaxed,
-            conv2d_v1_2::examples_large_nchw_relaxed);
+            conv2d_v1_2::get_examples_large_nchw_relaxed());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_large_nchw_quant8) {
     execute(conv2d_v1_2::CreateModel_large_nchw_quant8,
             conv2d_v1_2::is_ignored_large_nchw_quant8,
-            conv2d_v1_2::examples_large_nchw_quant8);
+            conv2d_v1_2::get_examples_large_nchw_quant8());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_large_nchw_weight_as_input) {
     execute(conv2d_v1_2::CreateModel_large_nchw_weight_as_input,
             conv2d_v1_2::is_ignored_large_nchw_weight_as_input,
-            conv2d_v1_2::examples_large_nchw_weight_as_input);
+            conv2d_v1_2::get_examples_large_nchw_weight_as_input());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_large_nchw_weight_as_input_relaxed) {
     execute(conv2d_v1_2::CreateModel_large_nchw_weight_as_input_relaxed,
             conv2d_v1_2::is_ignored_large_nchw_weight_as_input_relaxed,
-            conv2d_v1_2::examples_large_nchw_weight_as_input_relaxed);
+            conv2d_v1_2::get_examples_large_nchw_weight_as_input_relaxed());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_large_nchw_weight_as_input_quant8) {
     execute(conv2d_v1_2::CreateModel_large_nchw_weight_as_input_quant8,
             conv2d_v1_2::is_ignored_large_nchw_weight_as_input_quant8,
-            conv2d_v1_2::examples_large_nchw_weight_as_input_quant8);
+            conv2d_v1_2::get_examples_large_nchw_weight_as_input_quant8());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_1_H3_W2_SAME_nhwc) {
     execute(conv2d_v1_2::CreateModel_1_H3_W2_SAME_nhwc,
             conv2d_v1_2::is_ignored_1_H3_W2_SAME_nhwc,
-            conv2d_v1_2::examples_1_H3_W2_SAME_nhwc);
+            conv2d_v1_2::get_examples_1_H3_W2_SAME_nhwc());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_1_H3_W2_SAME_nhwc_relaxed) {
     execute(conv2d_v1_2::CreateModel_1_H3_W2_SAME_nhwc_relaxed,
             conv2d_v1_2::is_ignored_1_H3_W2_SAME_nhwc_relaxed,
-            conv2d_v1_2::examples_1_H3_W2_SAME_nhwc_relaxed);
+            conv2d_v1_2::get_examples_1_H3_W2_SAME_nhwc_relaxed());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_1_H3_W2_SAME_nchw) {
     execute(conv2d_v1_2::CreateModel_1_H3_W2_SAME_nchw,
             conv2d_v1_2::is_ignored_1_H3_W2_SAME_nchw,
-            conv2d_v1_2::examples_1_H3_W2_SAME_nchw);
+            conv2d_v1_2::get_examples_1_H3_W2_SAME_nchw());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_1_H3_W2_SAME_nchw_relaxed) {
     execute(conv2d_v1_2::CreateModel_1_H3_W2_SAME_nchw_relaxed,
             conv2d_v1_2::is_ignored_1_H3_W2_SAME_nchw_relaxed,
-            conv2d_v1_2::examples_1_H3_W2_SAME_nchw_relaxed);
+            conv2d_v1_2::get_examples_1_H3_W2_SAME_nchw_relaxed());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_1_H3_W2_VALID_nhwc) {
     execute(conv2d_v1_2::CreateModel_1_H3_W2_VALID_nhwc,
             conv2d_v1_2::is_ignored_1_H3_W2_VALID_nhwc,
-            conv2d_v1_2::examples_1_H3_W2_VALID_nhwc);
+            conv2d_v1_2::get_examples_1_H3_W2_VALID_nhwc());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_1_H3_W2_VALID_nhwc_relaxed) {
     execute(conv2d_v1_2::CreateModel_1_H3_W2_VALID_nhwc_relaxed,
             conv2d_v1_2::is_ignored_1_H3_W2_VALID_nhwc_relaxed,
-            conv2d_v1_2::examples_1_H3_W2_VALID_nhwc_relaxed);
+            conv2d_v1_2::get_examples_1_H3_W2_VALID_nhwc_relaxed());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_1_H3_W2_VALID_nchw) {
     execute(conv2d_v1_2::CreateModel_1_H3_W2_VALID_nchw,
             conv2d_v1_2::is_ignored_1_H3_W2_VALID_nchw,
-            conv2d_v1_2::examples_1_H3_W2_VALID_nchw);
+            conv2d_v1_2::get_examples_1_H3_W2_VALID_nchw());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_1_H3_W2_VALID_nchw_relaxed) {
     execute(conv2d_v1_2::CreateModel_1_H3_W2_VALID_nchw_relaxed,
             conv2d_v1_2::is_ignored_1_H3_W2_VALID_nchw_relaxed,
-            conv2d_v1_2::examples_1_H3_W2_VALID_nchw_relaxed);
+            conv2d_v1_2::get_examples_1_H3_W2_VALID_nchw_relaxed());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_3_H3_W2_SAME_nhwc) {
     execute(conv2d_v1_2::CreateModel_3_H3_W2_SAME_nhwc,
             conv2d_v1_2::is_ignored_3_H3_W2_SAME_nhwc,
-            conv2d_v1_2::examples_3_H3_W2_SAME_nhwc);
+            conv2d_v1_2::get_examples_3_H3_W2_SAME_nhwc());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_3_H3_W2_SAME_nhwc_relaxed) {
     execute(conv2d_v1_2::CreateModel_3_H3_W2_SAME_nhwc_relaxed,
             conv2d_v1_2::is_ignored_3_H3_W2_SAME_nhwc_relaxed,
-            conv2d_v1_2::examples_3_H3_W2_SAME_nhwc_relaxed);
+            conv2d_v1_2::get_examples_3_H3_W2_SAME_nhwc_relaxed());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_3_H3_W2_SAME_nchw) {
     execute(conv2d_v1_2::CreateModel_3_H3_W2_SAME_nchw,
             conv2d_v1_2::is_ignored_3_H3_W2_SAME_nchw,
-            conv2d_v1_2::examples_3_H3_W2_SAME_nchw);
+            conv2d_v1_2::get_examples_3_H3_W2_SAME_nchw());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_3_H3_W2_SAME_nchw_relaxed) {
     execute(conv2d_v1_2::CreateModel_3_H3_W2_SAME_nchw_relaxed,
             conv2d_v1_2::is_ignored_3_H3_W2_SAME_nchw_relaxed,
-            conv2d_v1_2::examples_3_H3_W2_SAME_nchw_relaxed);
+            conv2d_v1_2::get_examples_3_H3_W2_SAME_nchw_relaxed());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_3_H3_W2_VALID_nhwc) {
     execute(conv2d_v1_2::CreateModel_3_H3_W2_VALID_nhwc,
             conv2d_v1_2::is_ignored_3_H3_W2_VALID_nhwc,
-            conv2d_v1_2::examples_3_H3_W2_VALID_nhwc);
+            conv2d_v1_2::get_examples_3_H3_W2_VALID_nhwc());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_3_H3_W2_VALID_nhwc_relaxed) {
     execute(conv2d_v1_2::CreateModel_3_H3_W2_VALID_nhwc_relaxed,
             conv2d_v1_2::is_ignored_3_H3_W2_VALID_nhwc_relaxed,
-            conv2d_v1_2::examples_3_H3_W2_VALID_nhwc_relaxed);
+            conv2d_v1_2::get_examples_3_H3_W2_VALID_nhwc_relaxed());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_3_H3_W2_VALID_nchw) {
     execute(conv2d_v1_2::CreateModel_3_H3_W2_VALID_nchw,
             conv2d_v1_2::is_ignored_3_H3_W2_VALID_nchw,
-            conv2d_v1_2::examples_3_H3_W2_VALID_nchw);
+            conv2d_v1_2::get_examples_3_H3_W2_VALID_nchw());
 }
 
 TEST_F(GeneratedTests, conv2d_v1_2_3_H3_W2_VALID_nchw_relaxed) {
     execute(conv2d_v1_2::CreateModel_3_H3_W2_VALID_nchw_relaxed,
             conv2d_v1_2::is_ignored_3_H3_W2_VALID_nchw_relaxed,
-            conv2d_v1_2::examples_3_H3_W2_VALID_nchw_relaxed);
+            conv2d_v1_2::get_examples_3_H3_W2_VALID_nchw_relaxed());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_1_h3_w2_SAME.mod.py.cpp b/nn/runtime/test/generated/tests/conv_1_h3_w2_SAME.mod.py.cpp
index d46c9e7..936b62e 100644
--- a/nn/runtime/test/generated/tests/conv_1_h3_w2_SAME.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_1_h3_w2_SAME.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_1_h3_w2_SAME) {
     execute(conv_1_h3_w2_SAME::CreateModel,
             conv_1_h3_w2_SAME::is_ignored,
-            conv_1_h3_w2_SAME::examples);
+            conv_1_h3_w2_SAME::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_1_h3_w2_SAME_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/conv_1_h3_w2_SAME_relaxed.mod.py.cpp
index 0a784ad..733fd5b 100644
--- a/nn/runtime/test/generated/tests/conv_1_h3_w2_SAME_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_1_h3_w2_SAME_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_1_h3_w2_SAME_relaxed) {
     execute(conv_1_h3_w2_SAME_relaxed::CreateModel,
             conv_1_h3_w2_SAME_relaxed::is_ignored,
-            conv_1_h3_w2_SAME_relaxed::examples);
+            conv_1_h3_w2_SAME_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_1_h3_w2_VALID.mod.py.cpp b/nn/runtime/test/generated/tests/conv_1_h3_w2_VALID.mod.py.cpp
index 8538702..d53fbbf 100644
--- a/nn/runtime/test/generated/tests/conv_1_h3_w2_VALID.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_1_h3_w2_VALID.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_1_h3_w2_VALID) {
     execute(conv_1_h3_w2_VALID::CreateModel,
             conv_1_h3_w2_VALID::is_ignored,
-            conv_1_h3_w2_VALID::examples);
+            conv_1_h3_w2_VALID::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_1_h3_w2_VALID_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/conv_1_h3_w2_VALID_relaxed.mod.py.cpp
index eea38e0..ab3b453 100644
--- a/nn/runtime/test/generated/tests/conv_1_h3_w2_VALID_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_1_h3_w2_VALID_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_1_h3_w2_VALID_relaxed) {
     execute(conv_1_h3_w2_VALID_relaxed::CreateModel,
             conv_1_h3_w2_VALID_relaxed::is_ignored,
-            conv_1_h3_w2_VALID_relaxed::examples);
+            conv_1_h3_w2_VALID_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_3_h3_w2_SAME.mod.py.cpp b/nn/runtime/test/generated/tests/conv_3_h3_w2_SAME.mod.py.cpp
index d9c1e64..f940045 100644
--- a/nn/runtime/test/generated/tests/conv_3_h3_w2_SAME.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_3_h3_w2_SAME.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_3_h3_w2_SAME) {
     execute(conv_3_h3_w2_SAME::CreateModel,
             conv_3_h3_w2_SAME::is_ignored,
-            conv_3_h3_w2_SAME::examples);
+            conv_3_h3_w2_SAME::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_3_h3_w2_SAME_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/conv_3_h3_w2_SAME_relaxed.mod.py.cpp
index a9d251a..91bc56b 100644
--- a/nn/runtime/test/generated/tests/conv_3_h3_w2_SAME_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_3_h3_w2_SAME_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_3_h3_w2_SAME_relaxed) {
     execute(conv_3_h3_w2_SAME_relaxed::CreateModel,
             conv_3_h3_w2_SAME_relaxed::is_ignored,
-            conv_3_h3_w2_SAME_relaxed::examples);
+            conv_3_h3_w2_SAME_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_3_h3_w2_VALID.mod.py.cpp b/nn/runtime/test/generated/tests/conv_3_h3_w2_VALID.mod.py.cpp
index f2769a1..855838a 100644
--- a/nn/runtime/test/generated/tests/conv_3_h3_w2_VALID.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_3_h3_w2_VALID.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_3_h3_w2_VALID) {
     execute(conv_3_h3_w2_VALID::CreateModel,
             conv_3_h3_w2_VALID::is_ignored,
-            conv_3_h3_w2_VALID::examples);
+            conv_3_h3_w2_VALID::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_3_h3_w2_VALID_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/conv_3_h3_w2_VALID_relaxed.mod.py.cpp
index 5440573..0f9bbbe 100644
--- a/nn/runtime/test/generated/tests/conv_3_h3_w2_VALID_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_3_h3_w2_VALID_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_3_h3_w2_VALID_relaxed) {
     execute(conv_3_h3_w2_VALID_relaxed::CreateModel,
             conv_3_h3_w2_VALID_relaxed::is_ignored,
-            conv_3_h3_w2_VALID_relaxed::examples);
+            conv_3_h3_w2_VALID_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_float.mod.py.cpp b/nn/runtime/test/generated/tests/conv_float.mod.py.cpp
index ad9f038..2a5915f 100644
--- a/nn/runtime/test/generated/tests/conv_float.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_float.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_float) {
     execute(conv_float::CreateModel,
             conv_float::is_ignored,
-            conv_float::examples);
+            conv_float::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_float_2.mod.py.cpp b/nn/runtime/test/generated/tests/conv_float_2.mod.py.cpp
index d7c18ad..4ad3482 100644
--- a/nn/runtime/test/generated/tests/conv_float_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_float_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_float_2) {
     execute(conv_float_2::CreateModel,
             conv_float_2::is_ignored,
-            conv_float_2::examples);
+            conv_float_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_float_2_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/conv_float_2_relaxed.mod.py.cpp
index 0a9bf3d..e2f2624 100644
--- a/nn/runtime/test/generated/tests/conv_float_2_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_float_2_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_float_2_relaxed) {
     execute(conv_float_2_relaxed::CreateModel,
             conv_float_2_relaxed::is_ignored,
-            conv_float_2_relaxed::examples);
+            conv_float_2_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_float_channels.mod.py.cpp b/nn/runtime/test/generated/tests/conv_float_channels.mod.py.cpp
index 75c48b4..90281fb 100644
--- a/nn/runtime/test/generated/tests/conv_float_channels.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_float_channels.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_float_channels) {
     execute(conv_float_channels::CreateModel,
             conv_float_channels::is_ignored,
-            conv_float_channels::examples);
+            conv_float_channels::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_float_channels_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/conv_float_channels_relaxed.mod.py.cpp
index c360269..db131de 100644
--- a/nn/runtime/test/generated/tests/conv_float_channels_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_float_channels_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_float_channels_relaxed) {
     execute(conv_float_channels_relaxed::CreateModel,
             conv_float_channels_relaxed::is_ignored,
-            conv_float_channels_relaxed::examples);
+            conv_float_channels_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_float_channels_weights_as_inputs.mod.py.cpp b/nn/runtime/test/generated/tests/conv_float_channels_weights_as_inputs.mod.py.cpp
index aa33ac8..bec1465 100644
--- a/nn/runtime/test/generated/tests/conv_float_channels_weights_as_inputs.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_float_channels_weights_as_inputs.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_float_channels_weights_as_inputs) {
     execute(conv_float_channels_weights_as_inputs::CreateModel,
             conv_float_channels_weights_as_inputs::is_ignored,
-            conv_float_channels_weights_as_inputs::examples);
+            conv_float_channels_weights_as_inputs::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_float_channels_weights_as_inputs_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/conv_float_channels_weights_as_inputs_relaxed.mod.py.cpp
index 713b5ad..f44b450 100644
--- a/nn/runtime/test/generated/tests/conv_float_channels_weights_as_inputs_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_float_channels_weights_as_inputs_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_float_channels_weights_as_inputs_relaxed) {
     execute(conv_float_channels_weights_as_inputs_relaxed::CreateModel,
             conv_float_channels_weights_as_inputs_relaxed::is_ignored,
-            conv_float_channels_weights_as_inputs_relaxed::examples);
+            conv_float_channels_weights_as_inputs_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_float_large.mod.py.cpp b/nn/runtime/test/generated/tests/conv_float_large.mod.py.cpp
index d4c8d96..e0bd2bd 100644
--- a/nn/runtime/test/generated/tests/conv_float_large.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_float_large.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_float_large) {
     execute(conv_float_large::CreateModel,
             conv_float_large::is_ignored,
-            conv_float_large::examples);
+            conv_float_large::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_float_large_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/conv_float_large_relaxed.mod.py.cpp
index be79a56..f0e9bbe 100644
--- a/nn/runtime/test/generated/tests/conv_float_large_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_float_large_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_float_large_relaxed) {
     execute(conv_float_large_relaxed::CreateModel,
             conv_float_large_relaxed::is_ignored,
-            conv_float_large_relaxed::examples);
+            conv_float_large_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_float_large_weights_as_inputs.mod.py.cpp b/nn/runtime/test/generated/tests/conv_float_large_weights_as_inputs.mod.py.cpp
index f53c302..ebda572 100644
--- a/nn/runtime/test/generated/tests/conv_float_large_weights_as_inputs.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_float_large_weights_as_inputs.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_float_large_weights_as_inputs) {
     execute(conv_float_large_weights_as_inputs::CreateModel,
             conv_float_large_weights_as_inputs::is_ignored,
-            conv_float_large_weights_as_inputs::examples);
+            conv_float_large_weights_as_inputs::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_float_large_weights_as_inputs_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/conv_float_large_weights_as_inputs_relaxed.mod.py.cpp
index ecf8e34..333a3f6 100644
--- a/nn/runtime/test/generated/tests/conv_float_large_weights_as_inputs_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_float_large_weights_as_inputs_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_float_large_weights_as_inputs_relaxed) {
     execute(conv_float_large_weights_as_inputs_relaxed::CreateModel,
             conv_float_large_weights_as_inputs_relaxed::is_ignored,
-            conv_float_large_weights_as_inputs_relaxed::examples);
+            conv_float_large_weights_as_inputs_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_float_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/conv_float_relaxed.mod.py.cpp
index cd4a38f..2b964d7 100644
--- a/nn/runtime/test/generated/tests/conv_float_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_float_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_float_relaxed) {
     execute(conv_float_relaxed::CreateModel,
             conv_float_relaxed::is_ignored,
-            conv_float_relaxed::examples);
+            conv_float_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_float_weights_as_inputs.mod.py.cpp b/nn/runtime/test/generated/tests/conv_float_weights_as_inputs.mod.py.cpp
index 70a8250..e4c530f 100644
--- a/nn/runtime/test/generated/tests/conv_float_weights_as_inputs.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_float_weights_as_inputs.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_float_weights_as_inputs) {
     execute(conv_float_weights_as_inputs::CreateModel,
             conv_float_weights_as_inputs::is_ignored,
-            conv_float_weights_as_inputs::examples);
+            conv_float_weights_as_inputs::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_float_weights_as_inputs_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/conv_float_weights_as_inputs_relaxed.mod.py.cpp
index de8106d..efe7209 100644
--- a/nn/runtime/test/generated/tests/conv_float_weights_as_inputs_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_float_weights_as_inputs_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_float_weights_as_inputs_relaxed) {
     execute(conv_float_weights_as_inputs_relaxed::CreateModel,
             conv_float_weights_as_inputs_relaxed::is_ignored,
-            conv_float_weights_as_inputs_relaxed::examples);
+            conv_float_weights_as_inputs_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_quant8.mod.py.cpp b/nn/runtime/test/generated/tests/conv_quant8.mod.py.cpp
index c390c82..67c32f9 100644
--- a/nn/runtime/test/generated/tests/conv_quant8.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_quant8.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_quant8) {
     execute(conv_quant8::CreateModel,
             conv_quant8::is_ignored,
-            conv_quant8::examples);
+            conv_quant8::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_quant8_2.mod.py.cpp b/nn/runtime/test/generated/tests/conv_quant8_2.mod.py.cpp
index 1431928..4c736ee 100644
--- a/nn/runtime/test/generated/tests/conv_quant8_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_quant8_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_quant8_2) {
     execute(conv_quant8_2::CreateModel,
             conv_quant8_2::is_ignored,
-            conv_quant8_2::examples);
+            conv_quant8_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_quant8_channels.mod.py.cpp b/nn/runtime/test/generated/tests/conv_quant8_channels.mod.py.cpp
index 5746829..099012c 100644
--- a/nn/runtime/test/generated/tests/conv_quant8_channels.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_quant8_channels.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_quant8_channels) {
     execute(conv_quant8_channels::CreateModel,
             conv_quant8_channels::is_ignored,
-            conv_quant8_channels::examples);
+            conv_quant8_channels::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_quant8_channels_weights_as_inputs.mod.py.cpp b/nn/runtime/test/generated/tests/conv_quant8_channels_weights_as_inputs.mod.py.cpp
index ec7e763..838fdca 100644
--- a/nn/runtime/test/generated/tests/conv_quant8_channels_weights_as_inputs.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_quant8_channels_weights_as_inputs.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_quant8_channels_weights_as_inputs) {
     execute(conv_quant8_channels_weights_as_inputs::CreateModel,
             conv_quant8_channels_weights_as_inputs::is_ignored,
-            conv_quant8_channels_weights_as_inputs::examples);
+            conv_quant8_channels_weights_as_inputs::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_quant8_large.mod.py.cpp b/nn/runtime/test/generated/tests/conv_quant8_large.mod.py.cpp
index dc42d76..87529cf 100644
--- a/nn/runtime/test/generated/tests/conv_quant8_large.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_quant8_large.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_quant8_large) {
     execute(conv_quant8_large::CreateModel,
             conv_quant8_large::is_ignored,
-            conv_quant8_large::examples);
+            conv_quant8_large::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_quant8_large_weights_as_inputs.mod.py.cpp b/nn/runtime/test/generated/tests/conv_quant8_large_weights_as_inputs.mod.py.cpp
index c088451..7872eca 100644
--- a/nn/runtime/test/generated/tests/conv_quant8_large_weights_as_inputs.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_quant8_large_weights_as_inputs.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_quant8_large_weights_as_inputs) {
     execute(conv_quant8_large_weights_as_inputs::CreateModel,
             conv_quant8_large_weights_as_inputs::is_ignored,
-            conv_quant8_large_weights_as_inputs::examples);
+            conv_quant8_large_weights_as_inputs::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_quant8_overflow.mod.py.cpp b/nn/runtime/test/generated/tests/conv_quant8_overflow.mod.py.cpp
index bb14077..c345a8b 100644
--- a/nn/runtime/test/generated/tests/conv_quant8_overflow.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_quant8_overflow.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_quant8_overflow) {
     execute(conv_quant8_overflow::CreateModel,
             conv_quant8_overflow::is_ignored,
-            conv_quant8_overflow::examples);
+            conv_quant8_overflow::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_quant8_overflow_weights_as_inputs.mod.py.cpp b/nn/runtime/test/generated/tests/conv_quant8_overflow_weights_as_inputs.mod.py.cpp
index bfd5cab..52865a4 100644
--- a/nn/runtime/test/generated/tests/conv_quant8_overflow_weights_as_inputs.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_quant8_overflow_weights_as_inputs.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_quant8_overflow_weights_as_inputs) {
     execute(conv_quant8_overflow_weights_as_inputs::CreateModel,
             conv_quant8_overflow_weights_as_inputs::is_ignored,
-            conv_quant8_overflow_weights_as_inputs::examples);
+            conv_quant8_overflow_weights_as_inputs::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/conv_quant8_weights_as_inputs.mod.py.cpp b/nn/runtime/test/generated/tests/conv_quant8_weights_as_inputs.mod.py.cpp
index 9641f93..93d8772 100644
--- a/nn/runtime/test/generated/tests/conv_quant8_weights_as_inputs.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/conv_quant8_weights_as_inputs.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, conv_quant8_weights_as_inputs) {
     execute(conv_quant8_weights_as_inputs::CreateModel,
             conv_quant8_weights_as_inputs::is_ignored,
-            conv_quant8_weights_as_inputs::examples);
+            conv_quant8_weights_as_inputs::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/depth_to_space_float_1.mod.py.cpp b/nn/runtime/test/generated/tests/depth_to_space_float_1.mod.py.cpp
index da3f956..55db680 100644
--- a/nn/runtime/test/generated/tests/depth_to_space_float_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depth_to_space_float_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, depth_to_space_float_1) {
     execute(depth_to_space_float_1::CreateModel,
             depth_to_space_float_1::is_ignored,
-            depth_to_space_float_1::examples);
+            depth_to_space_float_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/depth_to_space_float_1_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/depth_to_space_float_1_relaxed.mod.py.cpp
index ba8b9e7..47890d4 100644
--- a/nn/runtime/test/generated/tests/depth_to_space_float_1_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depth_to_space_float_1_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, depth_to_space_float_1_relaxed) {
     execute(depth_to_space_float_1_relaxed::CreateModel,
             depth_to_space_float_1_relaxed::is_ignored,
-            depth_to_space_float_1_relaxed::examples);
+            depth_to_space_float_1_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/depth_to_space_float_2.mod.py.cpp b/nn/runtime/test/generated/tests/depth_to_space_float_2.mod.py.cpp
index 2514e80..53eda6d 100644
--- a/nn/runtime/test/generated/tests/depth_to_space_float_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depth_to_space_float_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, depth_to_space_float_2) {
     execute(depth_to_space_float_2::CreateModel,
             depth_to_space_float_2::is_ignored,
-            depth_to_space_float_2::examples);
+            depth_to_space_float_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/depth_to_space_float_2_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/depth_to_space_float_2_relaxed.mod.py.cpp
index 23b0015..125b70e 100644
--- a/nn/runtime/test/generated/tests/depth_to_space_float_2_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depth_to_space_float_2_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, depth_to_space_float_2_relaxed) {
     execute(depth_to_space_float_2_relaxed::CreateModel,
             depth_to_space_float_2_relaxed::is_ignored,
-            depth_to_space_float_2_relaxed::examples);
+            depth_to_space_float_2_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/depth_to_space_float_3.mod.py.cpp b/nn/runtime/test/generated/tests/depth_to_space_float_3.mod.py.cpp
index 06384f9..b57cf3d 100644
--- a/nn/runtime/test/generated/tests/depth_to_space_float_3.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depth_to_space_float_3.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, depth_to_space_float_3) {
     execute(depth_to_space_float_3::CreateModel,
             depth_to_space_float_3::is_ignored,
-            depth_to_space_float_3::examples);
+            depth_to_space_float_3::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/depth_to_space_float_3_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/depth_to_space_float_3_relaxed.mod.py.cpp
index 40ba8e2..c0c5424 100644
--- a/nn/runtime/test/generated/tests/depth_to_space_float_3_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depth_to_space_float_3_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, depth_to_space_float_3_relaxed) {
     execute(depth_to_space_float_3_relaxed::CreateModel,
             depth_to_space_float_3_relaxed::is_ignored,
-            depth_to_space_float_3_relaxed::examples);
+            depth_to_space_float_3_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/depth_to_space_quant8_1.mod.py.cpp b/nn/runtime/test/generated/tests/depth_to_space_quant8_1.mod.py.cpp
index c412ab0..b082758 100644
--- a/nn/runtime/test/generated/tests/depth_to_space_quant8_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depth_to_space_quant8_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, depth_to_space_quant8_1) {
     execute(depth_to_space_quant8_1::CreateModel,
             depth_to_space_quant8_1::is_ignored,
-            depth_to_space_quant8_1::examples);
+            depth_to_space_quant8_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/depth_to_space_quant8_2.mod.py.cpp b/nn/runtime/test/generated/tests/depth_to_space_quant8_2.mod.py.cpp
index e989470..a1c4538 100644
--- a/nn/runtime/test/generated/tests/depth_to_space_quant8_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depth_to_space_quant8_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, depth_to_space_quant8_2) {
     execute(depth_to_space_quant8_2::CreateModel,
             depth_to_space_quant8_2::is_ignored,
-            depth_to_space_quant8_2::examples);
+            depth_to_space_quant8_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/depth_to_space_v1_2.mod.py.cpp b/nn/runtime/test/generated/tests/depth_to_space_v1_2.mod.py.cpp
index 93e6a41..55bd07c 100644
--- a/nn/runtime/test/generated/tests/depth_to_space_v1_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depth_to_space_v1_2.mod.py.cpp
@@ -12,108 +12,144 @@
 TEST_F(GeneratedTests, depth_to_space_v1_2_nhwc) {
     execute(depth_to_space_v1_2::CreateModel_nhwc,
             depth_to_space_v1_2::is_ignored_nhwc,
-            depth_to_space_v1_2::examples_nhwc);
+            depth_to_space_v1_2::get_examples_nhwc());
 }
 
 TEST_F(GeneratedTests, depth_to_space_v1_2_nhwc_relaxed) {
     execute(depth_to_space_v1_2::CreateModel_nhwc_relaxed,
             depth_to_space_v1_2::is_ignored_nhwc_relaxed,
-            depth_to_space_v1_2::examples_nhwc_relaxed);
+            depth_to_space_v1_2::get_examples_nhwc_relaxed());
+}
+
+TEST_F(GeneratedTests, depth_to_space_v1_2_nhwc_float16) {
+    execute(depth_to_space_v1_2::CreateModel_nhwc_float16,
+            depth_to_space_v1_2::is_ignored_nhwc_float16,
+            depth_to_space_v1_2::get_examples_nhwc_float16());
 }
 
 TEST_F(GeneratedTests, depth_to_space_v1_2_nhwc_quant8) {
     execute(depth_to_space_v1_2::CreateModel_nhwc_quant8,
             depth_to_space_v1_2::is_ignored_nhwc_quant8,
-            depth_to_space_v1_2::examples_nhwc_quant8);
+            depth_to_space_v1_2::get_examples_nhwc_quant8());
 }
 
 TEST_F(GeneratedTests, depth_to_space_v1_2_nchw) {
     execute(depth_to_space_v1_2::CreateModel_nchw,
             depth_to_space_v1_2::is_ignored_nchw,
-            depth_to_space_v1_2::examples_nchw);
+            depth_to_space_v1_2::get_examples_nchw());
 }
 
 TEST_F(GeneratedTests, depth_to_space_v1_2_nchw_relaxed) {
     execute(depth_to_space_v1_2::CreateModel_nchw_relaxed,
             depth_to_space_v1_2::is_ignored_nchw_relaxed,
-            depth_to_space_v1_2::examples_nchw_relaxed);
+            depth_to_space_v1_2::get_examples_nchw_relaxed());
+}
+
+TEST_F(GeneratedTests, depth_to_space_v1_2_nchw_float16) {
+    execute(depth_to_space_v1_2::CreateModel_nchw_float16,
+            depth_to_space_v1_2::is_ignored_nchw_float16,
+            depth_to_space_v1_2::get_examples_nchw_float16());
 }
 
 TEST_F(GeneratedTests, depth_to_space_v1_2_nchw_quant8) {
     execute(depth_to_space_v1_2::CreateModel_nchw_quant8,
             depth_to_space_v1_2::is_ignored_nchw_quant8,
-            depth_to_space_v1_2::examples_nchw_quant8);
+            depth_to_space_v1_2::get_examples_nchw_quant8());
 }
 
 TEST_F(GeneratedTests, depth_to_space_v1_2_nhwc_2) {
     execute(depth_to_space_v1_2::CreateModel_nhwc_2,
             depth_to_space_v1_2::is_ignored_nhwc_2,
-            depth_to_space_v1_2::examples_nhwc_2);
+            depth_to_space_v1_2::get_examples_nhwc_2());
 }
 
 TEST_F(GeneratedTests, depth_to_space_v1_2_nhwc_relaxed_2) {
     execute(depth_to_space_v1_2::CreateModel_nhwc_relaxed_2,
             depth_to_space_v1_2::is_ignored_nhwc_relaxed_2,
-            depth_to_space_v1_2::examples_nhwc_relaxed_2);
+            depth_to_space_v1_2::get_examples_nhwc_relaxed_2());
+}
+
+TEST_F(GeneratedTests, depth_to_space_v1_2_nhwc_float16_2) {
+    execute(depth_to_space_v1_2::CreateModel_nhwc_float16_2,
+            depth_to_space_v1_2::is_ignored_nhwc_float16_2,
+            depth_to_space_v1_2::get_examples_nhwc_float16_2());
 }
 
 TEST_F(GeneratedTests, depth_to_space_v1_2_nhwc_quant8_2) {
     execute(depth_to_space_v1_2::CreateModel_nhwc_quant8_2,
             depth_to_space_v1_2::is_ignored_nhwc_quant8_2,
-            depth_to_space_v1_2::examples_nhwc_quant8_2);
+            depth_to_space_v1_2::get_examples_nhwc_quant8_2());
 }
 
 TEST_F(GeneratedTests, depth_to_space_v1_2_nchw_2) {
     execute(depth_to_space_v1_2::CreateModel_nchw_2,
             depth_to_space_v1_2::is_ignored_nchw_2,
-            depth_to_space_v1_2::examples_nchw_2);
+            depth_to_space_v1_2::get_examples_nchw_2());
 }
 
 TEST_F(GeneratedTests, depth_to_space_v1_2_nchw_relaxed_2) {
     execute(depth_to_space_v1_2::CreateModel_nchw_relaxed_2,
             depth_to_space_v1_2::is_ignored_nchw_relaxed_2,
-            depth_to_space_v1_2::examples_nchw_relaxed_2);
+            depth_to_space_v1_2::get_examples_nchw_relaxed_2());
+}
+
+TEST_F(GeneratedTests, depth_to_space_v1_2_nchw_float16_2) {
+    execute(depth_to_space_v1_2::CreateModel_nchw_float16_2,
+            depth_to_space_v1_2::is_ignored_nchw_float16_2,
+            depth_to_space_v1_2::get_examples_nchw_float16_2());
 }
 
 TEST_F(GeneratedTests, depth_to_space_v1_2_nchw_quant8_2) {
     execute(depth_to_space_v1_2::CreateModel_nchw_quant8_2,
             depth_to_space_v1_2::is_ignored_nchw_quant8_2,
-            depth_to_space_v1_2::examples_nchw_quant8_2);
+            depth_to_space_v1_2::get_examples_nchw_quant8_2());
 }
 
 TEST_F(GeneratedTests, depth_to_space_v1_2_nhwc_3) {
     execute(depth_to_space_v1_2::CreateModel_nhwc_3,
             depth_to_space_v1_2::is_ignored_nhwc_3,
-            depth_to_space_v1_2::examples_nhwc_3);
+            depth_to_space_v1_2::get_examples_nhwc_3());
 }
 
 TEST_F(GeneratedTests, depth_to_space_v1_2_nhwc_relaxed_3) {
     execute(depth_to_space_v1_2::CreateModel_nhwc_relaxed_3,
             depth_to_space_v1_2::is_ignored_nhwc_relaxed_3,
-            depth_to_space_v1_2::examples_nhwc_relaxed_3);
+            depth_to_space_v1_2::get_examples_nhwc_relaxed_3());
+}
+
+TEST_F(GeneratedTests, depth_to_space_v1_2_nhwc_float16_3) {
+    execute(depth_to_space_v1_2::CreateModel_nhwc_float16_3,
+            depth_to_space_v1_2::is_ignored_nhwc_float16_3,
+            depth_to_space_v1_2::get_examples_nhwc_float16_3());
 }
 
 TEST_F(GeneratedTests, depth_to_space_v1_2_nhwc_quant8_3) {
     execute(depth_to_space_v1_2::CreateModel_nhwc_quant8_3,
             depth_to_space_v1_2::is_ignored_nhwc_quant8_3,
-            depth_to_space_v1_2::examples_nhwc_quant8_3);
+            depth_to_space_v1_2::get_examples_nhwc_quant8_3());
 }
 
 TEST_F(GeneratedTests, depth_to_space_v1_2_nchw_3) {
     execute(depth_to_space_v1_2::CreateModel_nchw_3,
             depth_to_space_v1_2::is_ignored_nchw_3,
-            depth_to_space_v1_2::examples_nchw_3);
+            depth_to_space_v1_2::get_examples_nchw_3());
 }
 
 TEST_F(GeneratedTests, depth_to_space_v1_2_nchw_relaxed_3) {
     execute(depth_to_space_v1_2::CreateModel_nchw_relaxed_3,
             depth_to_space_v1_2::is_ignored_nchw_relaxed_3,
-            depth_to_space_v1_2::examples_nchw_relaxed_3);
+            depth_to_space_v1_2::get_examples_nchw_relaxed_3());
+}
+
+TEST_F(GeneratedTests, depth_to_space_v1_2_nchw_float16_3) {
+    execute(depth_to_space_v1_2::CreateModel_nchw_float16_3,
+            depth_to_space_v1_2::is_ignored_nchw_float16_3,
+            depth_to_space_v1_2::get_examples_nchw_float16_3());
 }
 
 TEST_F(GeneratedTests, depth_to_space_v1_2_nchw_quant8_3) {
     execute(depth_to_space_v1_2::CreateModel_nchw_quant8_3,
             depth_to_space_v1_2::is_ignored_nchw_quant8_3,
-            depth_to_space_v1_2::examples_nchw_quant8_3);
+            depth_to_space_v1_2::get_examples_nchw_quant8_3());
 }
 
diff --git a/nn/runtime/test/generated/tests/depthwise_conv.mod.py.cpp b/nn/runtime/test/generated/tests/depthwise_conv.mod.py.cpp
index 9ed8b98..bbe4699 100644
--- a/nn/runtime/test/generated/tests/depthwise_conv.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depthwise_conv.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, depthwise_conv) {
     execute(depthwise_conv::CreateModel,
             depthwise_conv::is_ignored,
-            depthwise_conv::examples);
+            depthwise_conv::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/depthwise_conv2d_float.mod.py.cpp b/nn/runtime/test/generated/tests/depthwise_conv2d_float.mod.py.cpp
index 90c01bb..d45266f 100644
--- a/nn/runtime/test/generated/tests/depthwise_conv2d_float.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depthwise_conv2d_float.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, depthwise_conv2d_float) {
     execute(depthwise_conv2d_float::CreateModel,
             depthwise_conv2d_float::is_ignored,
-            depthwise_conv2d_float::examples);
+            depthwise_conv2d_float::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/depthwise_conv2d_float_2.mod.py.cpp b/nn/runtime/test/generated/tests/depthwise_conv2d_float_2.mod.py.cpp
index 242744e..4d416ce 100644
--- a/nn/runtime/test/generated/tests/depthwise_conv2d_float_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depthwise_conv2d_float_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, depthwise_conv2d_float_2) {
     execute(depthwise_conv2d_float_2::CreateModel,
             depthwise_conv2d_float_2::is_ignored,
-            depthwise_conv2d_float_2::examples);
+            depthwise_conv2d_float_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/depthwise_conv2d_float_2_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/depthwise_conv2d_float_2_relaxed.mod.py.cpp
index cdd8e40..f9acf07 100644
--- a/nn/runtime/test/generated/tests/depthwise_conv2d_float_2_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depthwise_conv2d_float_2_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, depthwise_conv2d_float_2_relaxed) {
     execute(depthwise_conv2d_float_2_relaxed::CreateModel,
             depthwise_conv2d_float_2_relaxed::is_ignored,
-            depthwise_conv2d_float_2_relaxed::examples);
+            depthwise_conv2d_float_2_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/depthwise_conv2d_float_large.mod.py.cpp b/nn/runtime/test/generated/tests/depthwise_conv2d_float_large.mod.py.cpp
index 898b30a..dac43a2 100644
--- a/nn/runtime/test/generated/tests/depthwise_conv2d_float_large.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depthwise_conv2d_float_large.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, depthwise_conv2d_float_large) {
     execute(depthwise_conv2d_float_large::CreateModel,
             depthwise_conv2d_float_large::is_ignored,
-            depthwise_conv2d_float_large::examples);
+            depthwise_conv2d_float_large::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/depthwise_conv2d_float_large_2.mod.py.cpp b/nn/runtime/test/generated/tests/depthwise_conv2d_float_large_2.mod.py.cpp
index 6f5e61c..46a7e1e 100644
--- a/nn/runtime/test/generated/tests/depthwise_conv2d_float_large_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depthwise_conv2d_float_large_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, depthwise_conv2d_float_large_2) {
     execute(depthwise_conv2d_float_large_2::CreateModel,
             depthwise_conv2d_float_large_2::is_ignored,
-            depthwise_conv2d_float_large_2::examples);
+            depthwise_conv2d_float_large_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/depthwise_conv2d_float_large_2_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/depthwise_conv2d_float_large_2_relaxed.mod.py.cpp
index ca554a3..6f04298 100644
--- a/nn/runtime/test/generated/tests/depthwise_conv2d_float_large_2_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depthwise_conv2d_float_large_2_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, depthwise_conv2d_float_large_2_relaxed) {
     execute(depthwise_conv2d_float_large_2_relaxed::CreateModel,
             depthwise_conv2d_float_large_2_relaxed::is_ignored,
-            depthwise_conv2d_float_large_2_relaxed::examples);
+            depthwise_conv2d_float_large_2_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/depthwise_conv2d_float_large_2_weights_as_inputs.mod.py.cpp b/nn/runtime/test/generated/tests/depthwise_conv2d_float_large_2_weights_as_inputs.mod.py.cpp
index 34a74ac..f7da4dd 100644
--- a/nn/runtime/test/generated/tests/depthwise_conv2d_float_large_2_weights_as_inputs.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depthwise_conv2d_float_large_2_weights_as_inputs.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, depthwise_conv2d_float_large_2_weights_as_inputs) {
     execute(depthwise_conv2d_float_large_2_weights_as_inputs::CreateModel,
             depthwise_conv2d_float_large_2_weights_as_inputs::is_ignored,
-            depthwise_conv2d_float_large_2_weights_as_inputs::examples);
+            depthwise_conv2d_float_large_2_weights_as_inputs::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/depthwise_conv2d_float_large_2_weights_as_inputs_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/depthwise_conv2d_float_large_2_weights_as_inputs_relaxed.mod.py.cpp
index 57ca9e2..e727b70 100644
--- a/nn/runtime/test/generated/tests/depthwise_conv2d_float_large_2_weights_as_inputs_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depthwise_conv2d_float_large_2_weights_as_inputs_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, depthwise_conv2d_float_large_2_weights_as_inputs_relaxed) {
     execute(depthwise_conv2d_float_large_2_weights_as_inputs_relaxed::CreateModel,
             depthwise_conv2d_float_large_2_weights_as_inputs_relaxed::is_ignored,
-            depthwise_conv2d_float_large_2_weights_as_inputs_relaxed::examples);
+            depthwise_conv2d_float_large_2_weights_as_inputs_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/depthwise_conv2d_float_large_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/depthwise_conv2d_float_large_relaxed.mod.py.cpp
index 1f45be5..12f5248 100644
--- a/nn/runtime/test/generated/tests/depthwise_conv2d_float_large_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depthwise_conv2d_float_large_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, depthwise_conv2d_float_large_relaxed) {
     execute(depthwise_conv2d_float_large_relaxed::CreateModel,
             depthwise_conv2d_float_large_relaxed::is_ignored,
-            depthwise_conv2d_float_large_relaxed::examples);
+            depthwise_conv2d_float_large_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/depthwise_conv2d_float_large_weights_as_inputs.mod.py.cpp b/nn/runtime/test/generated/tests/depthwise_conv2d_float_large_weights_as_inputs.mod.py.cpp
index 0201026..f153aeb 100644
--- a/nn/runtime/test/generated/tests/depthwise_conv2d_float_large_weights_as_inputs.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depthwise_conv2d_float_large_weights_as_inputs.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, depthwise_conv2d_float_large_weights_as_inputs) {
     execute(depthwise_conv2d_float_large_weights_as_inputs::CreateModel,
             depthwise_conv2d_float_large_weights_as_inputs::is_ignored,
-            depthwise_conv2d_float_large_weights_as_inputs::examples);
+            depthwise_conv2d_float_large_weights_as_inputs::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/depthwise_conv2d_float_large_weights_as_inputs_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/depthwise_conv2d_float_large_weights_as_inputs_relaxed.mod.py.cpp
index 7b8cc80..0d433b5 100644
--- a/nn/runtime/test/generated/tests/depthwise_conv2d_float_large_weights_as_inputs_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depthwise_conv2d_float_large_weights_as_inputs_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, depthwise_conv2d_float_large_weights_as_inputs_relaxed) {
     execute(depthwise_conv2d_float_large_weights_as_inputs_relaxed::CreateModel,
             depthwise_conv2d_float_large_weights_as_inputs_relaxed::is_ignored,
-            depthwise_conv2d_float_large_weights_as_inputs_relaxed::examples);
+            depthwise_conv2d_float_large_weights_as_inputs_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/depthwise_conv2d_float_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/depthwise_conv2d_float_relaxed.mod.py.cpp
index 482ceff..a9af019 100644
--- a/nn/runtime/test/generated/tests/depthwise_conv2d_float_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depthwise_conv2d_float_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, depthwise_conv2d_float_relaxed) {
     execute(depthwise_conv2d_float_relaxed::CreateModel,
             depthwise_conv2d_float_relaxed::is_ignored,
-            depthwise_conv2d_float_relaxed::examples);
+            depthwise_conv2d_float_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/depthwise_conv2d_float_weights_as_inputs.mod.py.cpp b/nn/runtime/test/generated/tests/depthwise_conv2d_float_weights_as_inputs.mod.py.cpp
index 8bd1088..8a37b14 100644
--- a/nn/runtime/test/generated/tests/depthwise_conv2d_float_weights_as_inputs.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depthwise_conv2d_float_weights_as_inputs.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, depthwise_conv2d_float_weights_as_inputs) {
     execute(depthwise_conv2d_float_weights_as_inputs::CreateModel,
             depthwise_conv2d_float_weights_as_inputs::is_ignored,
-            depthwise_conv2d_float_weights_as_inputs::examples);
+            depthwise_conv2d_float_weights_as_inputs::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/depthwise_conv2d_float_weights_as_inputs_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/depthwise_conv2d_float_weights_as_inputs_relaxed.mod.py.cpp
index 96598dd..d017811 100644
--- a/nn/runtime/test/generated/tests/depthwise_conv2d_float_weights_as_inputs_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depthwise_conv2d_float_weights_as_inputs_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, depthwise_conv2d_float_weights_as_inputs_relaxed) {
     execute(depthwise_conv2d_float_weights_as_inputs_relaxed::CreateModel,
             depthwise_conv2d_float_weights_as_inputs_relaxed::is_ignored,
-            depthwise_conv2d_float_weights_as_inputs_relaxed::examples);
+            depthwise_conv2d_float_weights_as_inputs_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/depthwise_conv2d_quant8.mod.py.cpp b/nn/runtime/test/generated/tests/depthwise_conv2d_quant8.mod.py.cpp
index 681fd75..43c4d9e 100644
--- a/nn/runtime/test/generated/tests/depthwise_conv2d_quant8.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depthwise_conv2d_quant8.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, depthwise_conv2d_quant8) {
     execute(depthwise_conv2d_quant8::CreateModel,
             depthwise_conv2d_quant8::is_ignored,
-            depthwise_conv2d_quant8::examples);
+            depthwise_conv2d_quant8::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/depthwise_conv2d_quant8_2.mod.py.cpp b/nn/runtime/test/generated/tests/depthwise_conv2d_quant8_2.mod.py.cpp
index b526e37..8621474 100644
--- a/nn/runtime/test/generated/tests/depthwise_conv2d_quant8_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depthwise_conv2d_quant8_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, depthwise_conv2d_quant8_2) {
     execute(depthwise_conv2d_quant8_2::CreateModel,
             depthwise_conv2d_quant8_2::is_ignored,
-            depthwise_conv2d_quant8_2::examples);
+            depthwise_conv2d_quant8_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/depthwise_conv2d_quant8_large.mod.py.cpp b/nn/runtime/test/generated/tests/depthwise_conv2d_quant8_large.mod.py.cpp
index 9da67db..c7d5b9c 100644
--- a/nn/runtime/test/generated/tests/depthwise_conv2d_quant8_large.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depthwise_conv2d_quant8_large.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, depthwise_conv2d_quant8_large) {
     execute(depthwise_conv2d_quant8_large::CreateModel,
             depthwise_conv2d_quant8_large::is_ignored,
-            depthwise_conv2d_quant8_large::examples);
+            depthwise_conv2d_quant8_large::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/depthwise_conv2d_quant8_large_weights_as_inputs.mod.py.cpp b/nn/runtime/test/generated/tests/depthwise_conv2d_quant8_large_weights_as_inputs.mod.py.cpp
index a6cf1cb..fd21286 100644
--- a/nn/runtime/test/generated/tests/depthwise_conv2d_quant8_large_weights_as_inputs.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depthwise_conv2d_quant8_large_weights_as_inputs.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, depthwise_conv2d_quant8_large_weights_as_inputs) {
     execute(depthwise_conv2d_quant8_large_weights_as_inputs::CreateModel,
             depthwise_conv2d_quant8_large_weights_as_inputs::is_ignored,
-            depthwise_conv2d_quant8_large_weights_as_inputs::examples);
+            depthwise_conv2d_quant8_large_weights_as_inputs::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/depthwise_conv2d_quant8_weights_as_inputs.mod.py.cpp b/nn/runtime/test/generated/tests/depthwise_conv2d_quant8_weights_as_inputs.mod.py.cpp
index 73cadf9..d2019a3 100644
--- a/nn/runtime/test/generated/tests/depthwise_conv2d_quant8_weights_as_inputs.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depthwise_conv2d_quant8_weights_as_inputs.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, depthwise_conv2d_quant8_weights_as_inputs) {
     execute(depthwise_conv2d_quant8_weights_as_inputs::CreateModel,
             depthwise_conv2d_quant8_weights_as_inputs::is_ignored,
-            depthwise_conv2d_quant8_weights_as_inputs::examples);
+            depthwise_conv2d_quant8_weights_as_inputs::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/depthwise_conv2d_v1_2.mod.py.cpp b/nn/runtime/test/generated/tests/depthwise_conv2d_v1_2.mod.py.cpp
index 186f202..db44d60 100644
--- a/nn/runtime/test/generated/tests/depthwise_conv2d_v1_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depthwise_conv2d_v1_2.mod.py.cpp
@@ -12,288 +12,384 @@
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nhwc) {
     execute(depthwise_conv2d_v1_2::CreateModel_nhwc,
             depthwise_conv2d_v1_2::is_ignored_nhwc,
-            depthwise_conv2d_v1_2::examples_nhwc);
+            depthwise_conv2d_v1_2::get_examples_nhwc());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nhwc_relaxed) {
     execute(depthwise_conv2d_v1_2::CreateModel_nhwc_relaxed,
             depthwise_conv2d_v1_2::is_ignored_nhwc_relaxed,
-            depthwise_conv2d_v1_2::examples_nhwc_relaxed);
+            depthwise_conv2d_v1_2::get_examples_nhwc_relaxed());
+}
+
+TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nhwc_float16) {
+    execute(depthwise_conv2d_v1_2::CreateModel_nhwc_float16,
+            depthwise_conv2d_v1_2::is_ignored_nhwc_float16,
+            depthwise_conv2d_v1_2::get_examples_nhwc_float16());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nhwc_quant8) {
     execute(depthwise_conv2d_v1_2::CreateModel_nhwc_quant8,
             depthwise_conv2d_v1_2::is_ignored_nhwc_quant8,
-            depthwise_conv2d_v1_2::examples_nhwc_quant8);
+            depthwise_conv2d_v1_2::get_examples_nhwc_quant8());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nhwc_weight_as_input) {
     execute(depthwise_conv2d_v1_2::CreateModel_nhwc_weight_as_input,
             depthwise_conv2d_v1_2::is_ignored_nhwc_weight_as_input,
-            depthwise_conv2d_v1_2::examples_nhwc_weight_as_input);
+            depthwise_conv2d_v1_2::get_examples_nhwc_weight_as_input());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nhwc_weight_as_input_relaxed) {
     execute(depthwise_conv2d_v1_2::CreateModel_nhwc_weight_as_input_relaxed,
             depthwise_conv2d_v1_2::is_ignored_nhwc_weight_as_input_relaxed,
-            depthwise_conv2d_v1_2::examples_nhwc_weight_as_input_relaxed);
+            depthwise_conv2d_v1_2::get_examples_nhwc_weight_as_input_relaxed());
+}
+
+TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nhwc_weight_as_input_float16) {
+    execute(depthwise_conv2d_v1_2::CreateModel_nhwc_weight_as_input_float16,
+            depthwise_conv2d_v1_2::is_ignored_nhwc_weight_as_input_float16,
+            depthwise_conv2d_v1_2::get_examples_nhwc_weight_as_input_float16());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nhwc_weight_as_input_quant8) {
     execute(depthwise_conv2d_v1_2::CreateModel_nhwc_weight_as_input_quant8,
             depthwise_conv2d_v1_2::is_ignored_nhwc_weight_as_input_quant8,
-            depthwise_conv2d_v1_2::examples_nhwc_weight_as_input_quant8);
+            depthwise_conv2d_v1_2::get_examples_nhwc_weight_as_input_quant8());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nchw) {
     execute(depthwise_conv2d_v1_2::CreateModel_nchw,
             depthwise_conv2d_v1_2::is_ignored_nchw,
-            depthwise_conv2d_v1_2::examples_nchw);
+            depthwise_conv2d_v1_2::get_examples_nchw());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nchw_relaxed) {
     execute(depthwise_conv2d_v1_2::CreateModel_nchw_relaxed,
             depthwise_conv2d_v1_2::is_ignored_nchw_relaxed,
-            depthwise_conv2d_v1_2::examples_nchw_relaxed);
+            depthwise_conv2d_v1_2::get_examples_nchw_relaxed());
+}
+
+TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nchw_float16) {
+    execute(depthwise_conv2d_v1_2::CreateModel_nchw_float16,
+            depthwise_conv2d_v1_2::is_ignored_nchw_float16,
+            depthwise_conv2d_v1_2::get_examples_nchw_float16());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nchw_quant8) {
     execute(depthwise_conv2d_v1_2::CreateModel_nchw_quant8,
             depthwise_conv2d_v1_2::is_ignored_nchw_quant8,
-            depthwise_conv2d_v1_2::examples_nchw_quant8);
+            depthwise_conv2d_v1_2::get_examples_nchw_quant8());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nchw_weight_as_input) {
     execute(depthwise_conv2d_v1_2::CreateModel_nchw_weight_as_input,
             depthwise_conv2d_v1_2::is_ignored_nchw_weight_as_input,
-            depthwise_conv2d_v1_2::examples_nchw_weight_as_input);
+            depthwise_conv2d_v1_2::get_examples_nchw_weight_as_input());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nchw_weight_as_input_relaxed) {
     execute(depthwise_conv2d_v1_2::CreateModel_nchw_weight_as_input_relaxed,
             depthwise_conv2d_v1_2::is_ignored_nchw_weight_as_input_relaxed,
-            depthwise_conv2d_v1_2::examples_nchw_weight_as_input_relaxed);
+            depthwise_conv2d_v1_2::get_examples_nchw_weight_as_input_relaxed());
+}
+
+TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nchw_weight_as_input_float16) {
+    execute(depthwise_conv2d_v1_2::CreateModel_nchw_weight_as_input_float16,
+            depthwise_conv2d_v1_2::is_ignored_nchw_weight_as_input_float16,
+            depthwise_conv2d_v1_2::get_examples_nchw_weight_as_input_float16());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nchw_weight_as_input_quant8) {
     execute(depthwise_conv2d_v1_2::CreateModel_nchw_weight_as_input_quant8,
             depthwise_conv2d_v1_2::is_ignored_nchw_weight_as_input_quant8,
-            depthwise_conv2d_v1_2::examples_nchw_weight_as_input_quant8);
+            depthwise_conv2d_v1_2::get_examples_nchw_weight_as_input_quant8());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nhwc_2) {
     execute(depthwise_conv2d_v1_2::CreateModel_nhwc_2,
             depthwise_conv2d_v1_2::is_ignored_nhwc_2,
-            depthwise_conv2d_v1_2::examples_nhwc_2);
+            depthwise_conv2d_v1_2::get_examples_nhwc_2());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nhwc_relaxed_2) {
     execute(depthwise_conv2d_v1_2::CreateModel_nhwc_relaxed_2,
             depthwise_conv2d_v1_2::is_ignored_nhwc_relaxed_2,
-            depthwise_conv2d_v1_2::examples_nhwc_relaxed_2);
+            depthwise_conv2d_v1_2::get_examples_nhwc_relaxed_2());
+}
+
+TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nhwc_float16_2) {
+    execute(depthwise_conv2d_v1_2::CreateModel_nhwc_float16_2,
+            depthwise_conv2d_v1_2::is_ignored_nhwc_float16_2,
+            depthwise_conv2d_v1_2::get_examples_nhwc_float16_2());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nhwc_quant8_2) {
     execute(depthwise_conv2d_v1_2::CreateModel_nhwc_quant8_2,
             depthwise_conv2d_v1_2::is_ignored_nhwc_quant8_2,
-            depthwise_conv2d_v1_2::examples_nhwc_quant8_2);
+            depthwise_conv2d_v1_2::get_examples_nhwc_quant8_2());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nhwc_weight_as_input_2) {
     execute(depthwise_conv2d_v1_2::CreateModel_nhwc_weight_as_input_2,
             depthwise_conv2d_v1_2::is_ignored_nhwc_weight_as_input_2,
-            depthwise_conv2d_v1_2::examples_nhwc_weight_as_input_2);
+            depthwise_conv2d_v1_2::get_examples_nhwc_weight_as_input_2());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nhwc_weight_as_input_relaxed_2) {
     execute(depthwise_conv2d_v1_2::CreateModel_nhwc_weight_as_input_relaxed_2,
             depthwise_conv2d_v1_2::is_ignored_nhwc_weight_as_input_relaxed_2,
-            depthwise_conv2d_v1_2::examples_nhwc_weight_as_input_relaxed_2);
+            depthwise_conv2d_v1_2::get_examples_nhwc_weight_as_input_relaxed_2());
+}
+
+TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nhwc_weight_as_input_float16_2) {
+    execute(depthwise_conv2d_v1_2::CreateModel_nhwc_weight_as_input_float16_2,
+            depthwise_conv2d_v1_2::is_ignored_nhwc_weight_as_input_float16_2,
+            depthwise_conv2d_v1_2::get_examples_nhwc_weight_as_input_float16_2());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nhwc_weight_as_input_quant8_2) {
     execute(depthwise_conv2d_v1_2::CreateModel_nhwc_weight_as_input_quant8_2,
             depthwise_conv2d_v1_2::is_ignored_nhwc_weight_as_input_quant8_2,
-            depthwise_conv2d_v1_2::examples_nhwc_weight_as_input_quant8_2);
+            depthwise_conv2d_v1_2::get_examples_nhwc_weight_as_input_quant8_2());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nchw_2) {
     execute(depthwise_conv2d_v1_2::CreateModel_nchw_2,
             depthwise_conv2d_v1_2::is_ignored_nchw_2,
-            depthwise_conv2d_v1_2::examples_nchw_2);
+            depthwise_conv2d_v1_2::get_examples_nchw_2());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nchw_relaxed_2) {
     execute(depthwise_conv2d_v1_2::CreateModel_nchw_relaxed_2,
             depthwise_conv2d_v1_2::is_ignored_nchw_relaxed_2,
-            depthwise_conv2d_v1_2::examples_nchw_relaxed_2);
+            depthwise_conv2d_v1_2::get_examples_nchw_relaxed_2());
+}
+
+TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nchw_float16_2) {
+    execute(depthwise_conv2d_v1_2::CreateModel_nchw_float16_2,
+            depthwise_conv2d_v1_2::is_ignored_nchw_float16_2,
+            depthwise_conv2d_v1_2::get_examples_nchw_float16_2());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nchw_quant8_2) {
     execute(depthwise_conv2d_v1_2::CreateModel_nchw_quant8_2,
             depthwise_conv2d_v1_2::is_ignored_nchw_quant8_2,
-            depthwise_conv2d_v1_2::examples_nchw_quant8_2);
+            depthwise_conv2d_v1_2::get_examples_nchw_quant8_2());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nchw_weight_as_input_2) {
     execute(depthwise_conv2d_v1_2::CreateModel_nchw_weight_as_input_2,
             depthwise_conv2d_v1_2::is_ignored_nchw_weight_as_input_2,
-            depthwise_conv2d_v1_2::examples_nchw_weight_as_input_2);
+            depthwise_conv2d_v1_2::get_examples_nchw_weight_as_input_2());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nchw_weight_as_input_relaxed_2) {
     execute(depthwise_conv2d_v1_2::CreateModel_nchw_weight_as_input_relaxed_2,
             depthwise_conv2d_v1_2::is_ignored_nchw_weight_as_input_relaxed_2,
-            depthwise_conv2d_v1_2::examples_nchw_weight_as_input_relaxed_2);
+            depthwise_conv2d_v1_2::get_examples_nchw_weight_as_input_relaxed_2());
+}
+
+TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nchw_weight_as_input_float16_2) {
+    execute(depthwise_conv2d_v1_2::CreateModel_nchw_weight_as_input_float16_2,
+            depthwise_conv2d_v1_2::is_ignored_nchw_weight_as_input_float16_2,
+            depthwise_conv2d_v1_2::get_examples_nchw_weight_as_input_float16_2());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_nchw_weight_as_input_quant8_2) {
     execute(depthwise_conv2d_v1_2::CreateModel_nchw_weight_as_input_quant8_2,
             depthwise_conv2d_v1_2::is_ignored_nchw_weight_as_input_quant8_2,
-            depthwise_conv2d_v1_2::examples_nchw_weight_as_input_quant8_2);
+            depthwise_conv2d_v1_2::get_examples_nchw_weight_as_input_quant8_2());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nhwc) {
     execute(depthwise_conv2d_v1_2::CreateModel_large_nhwc,
             depthwise_conv2d_v1_2::is_ignored_large_nhwc,
-            depthwise_conv2d_v1_2::examples_large_nhwc);
+            depthwise_conv2d_v1_2::get_examples_large_nhwc());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nhwc_relaxed) {
     execute(depthwise_conv2d_v1_2::CreateModel_large_nhwc_relaxed,
             depthwise_conv2d_v1_2::is_ignored_large_nhwc_relaxed,
-            depthwise_conv2d_v1_2::examples_large_nhwc_relaxed);
+            depthwise_conv2d_v1_2::get_examples_large_nhwc_relaxed());
+}
+
+TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nhwc_float16) {
+    execute(depthwise_conv2d_v1_2::CreateModel_large_nhwc_float16,
+            depthwise_conv2d_v1_2::is_ignored_large_nhwc_float16,
+            depthwise_conv2d_v1_2::get_examples_large_nhwc_float16());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nhwc_quant8) {
     execute(depthwise_conv2d_v1_2::CreateModel_large_nhwc_quant8,
             depthwise_conv2d_v1_2::is_ignored_large_nhwc_quant8,
-            depthwise_conv2d_v1_2::examples_large_nhwc_quant8);
+            depthwise_conv2d_v1_2::get_examples_large_nhwc_quant8());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nhwc_weight_as_input) {
     execute(depthwise_conv2d_v1_2::CreateModel_large_nhwc_weight_as_input,
             depthwise_conv2d_v1_2::is_ignored_large_nhwc_weight_as_input,
-            depthwise_conv2d_v1_2::examples_large_nhwc_weight_as_input);
+            depthwise_conv2d_v1_2::get_examples_large_nhwc_weight_as_input());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nhwc_weight_as_input_relaxed) {
     execute(depthwise_conv2d_v1_2::CreateModel_large_nhwc_weight_as_input_relaxed,
             depthwise_conv2d_v1_2::is_ignored_large_nhwc_weight_as_input_relaxed,
-            depthwise_conv2d_v1_2::examples_large_nhwc_weight_as_input_relaxed);
+            depthwise_conv2d_v1_2::get_examples_large_nhwc_weight_as_input_relaxed());
+}
+
+TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nhwc_weight_as_input_float16) {
+    execute(depthwise_conv2d_v1_2::CreateModel_large_nhwc_weight_as_input_float16,
+            depthwise_conv2d_v1_2::is_ignored_large_nhwc_weight_as_input_float16,
+            depthwise_conv2d_v1_2::get_examples_large_nhwc_weight_as_input_float16());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nhwc_weight_as_input_quant8) {
     execute(depthwise_conv2d_v1_2::CreateModel_large_nhwc_weight_as_input_quant8,
             depthwise_conv2d_v1_2::is_ignored_large_nhwc_weight_as_input_quant8,
-            depthwise_conv2d_v1_2::examples_large_nhwc_weight_as_input_quant8);
+            depthwise_conv2d_v1_2::get_examples_large_nhwc_weight_as_input_quant8());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nchw) {
     execute(depthwise_conv2d_v1_2::CreateModel_large_nchw,
             depthwise_conv2d_v1_2::is_ignored_large_nchw,
-            depthwise_conv2d_v1_2::examples_large_nchw);
+            depthwise_conv2d_v1_2::get_examples_large_nchw());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nchw_relaxed) {
     execute(depthwise_conv2d_v1_2::CreateModel_large_nchw_relaxed,
             depthwise_conv2d_v1_2::is_ignored_large_nchw_relaxed,
-            depthwise_conv2d_v1_2::examples_large_nchw_relaxed);
+            depthwise_conv2d_v1_2::get_examples_large_nchw_relaxed());
+}
+
+TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nchw_float16) {
+    execute(depthwise_conv2d_v1_2::CreateModel_large_nchw_float16,
+            depthwise_conv2d_v1_2::is_ignored_large_nchw_float16,
+            depthwise_conv2d_v1_2::get_examples_large_nchw_float16());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nchw_quant8) {
     execute(depthwise_conv2d_v1_2::CreateModel_large_nchw_quant8,
             depthwise_conv2d_v1_2::is_ignored_large_nchw_quant8,
-            depthwise_conv2d_v1_2::examples_large_nchw_quant8);
+            depthwise_conv2d_v1_2::get_examples_large_nchw_quant8());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nchw_weight_as_input) {
     execute(depthwise_conv2d_v1_2::CreateModel_large_nchw_weight_as_input,
             depthwise_conv2d_v1_2::is_ignored_large_nchw_weight_as_input,
-            depthwise_conv2d_v1_2::examples_large_nchw_weight_as_input);
+            depthwise_conv2d_v1_2::get_examples_large_nchw_weight_as_input());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nchw_weight_as_input_relaxed) {
     execute(depthwise_conv2d_v1_2::CreateModel_large_nchw_weight_as_input_relaxed,
             depthwise_conv2d_v1_2::is_ignored_large_nchw_weight_as_input_relaxed,
-            depthwise_conv2d_v1_2::examples_large_nchw_weight_as_input_relaxed);
+            depthwise_conv2d_v1_2::get_examples_large_nchw_weight_as_input_relaxed());
+}
+
+TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nchw_weight_as_input_float16) {
+    execute(depthwise_conv2d_v1_2::CreateModel_large_nchw_weight_as_input_float16,
+            depthwise_conv2d_v1_2::is_ignored_large_nchw_weight_as_input_float16,
+            depthwise_conv2d_v1_2::get_examples_large_nchw_weight_as_input_float16());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nchw_weight_as_input_quant8) {
     execute(depthwise_conv2d_v1_2::CreateModel_large_nchw_weight_as_input_quant8,
             depthwise_conv2d_v1_2::is_ignored_large_nchw_weight_as_input_quant8,
-            depthwise_conv2d_v1_2::examples_large_nchw_weight_as_input_quant8);
+            depthwise_conv2d_v1_2::get_examples_large_nchw_weight_as_input_quant8());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nhwc_2) {
     execute(depthwise_conv2d_v1_2::CreateModel_large_nhwc_2,
             depthwise_conv2d_v1_2::is_ignored_large_nhwc_2,
-            depthwise_conv2d_v1_2::examples_large_nhwc_2);
+            depthwise_conv2d_v1_2::get_examples_large_nhwc_2());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nhwc_relaxed_2) {
     execute(depthwise_conv2d_v1_2::CreateModel_large_nhwc_relaxed_2,
             depthwise_conv2d_v1_2::is_ignored_large_nhwc_relaxed_2,
-            depthwise_conv2d_v1_2::examples_large_nhwc_relaxed_2);
+            depthwise_conv2d_v1_2::get_examples_large_nhwc_relaxed_2());
+}
+
+TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nhwc_float16_2) {
+    execute(depthwise_conv2d_v1_2::CreateModel_large_nhwc_float16_2,
+            depthwise_conv2d_v1_2::is_ignored_large_nhwc_float16_2,
+            depthwise_conv2d_v1_2::get_examples_large_nhwc_float16_2());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nhwc_quant8_2) {
     execute(depthwise_conv2d_v1_2::CreateModel_large_nhwc_quant8_2,
             depthwise_conv2d_v1_2::is_ignored_large_nhwc_quant8_2,
-            depthwise_conv2d_v1_2::examples_large_nhwc_quant8_2);
+            depthwise_conv2d_v1_2::get_examples_large_nhwc_quant8_2());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nhwc_weight_as_input_2) {
     execute(depthwise_conv2d_v1_2::CreateModel_large_nhwc_weight_as_input_2,
             depthwise_conv2d_v1_2::is_ignored_large_nhwc_weight_as_input_2,
-            depthwise_conv2d_v1_2::examples_large_nhwc_weight_as_input_2);
+            depthwise_conv2d_v1_2::get_examples_large_nhwc_weight_as_input_2());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nhwc_weight_as_input_relaxed_2) {
     execute(depthwise_conv2d_v1_2::CreateModel_large_nhwc_weight_as_input_relaxed_2,
             depthwise_conv2d_v1_2::is_ignored_large_nhwc_weight_as_input_relaxed_2,
-            depthwise_conv2d_v1_2::examples_large_nhwc_weight_as_input_relaxed_2);
+            depthwise_conv2d_v1_2::get_examples_large_nhwc_weight_as_input_relaxed_2());
+}
+
+TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nhwc_weight_as_input_float16_2) {
+    execute(depthwise_conv2d_v1_2::CreateModel_large_nhwc_weight_as_input_float16_2,
+            depthwise_conv2d_v1_2::is_ignored_large_nhwc_weight_as_input_float16_2,
+            depthwise_conv2d_v1_2::get_examples_large_nhwc_weight_as_input_float16_2());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nhwc_weight_as_input_quant8_2) {
     execute(depthwise_conv2d_v1_2::CreateModel_large_nhwc_weight_as_input_quant8_2,
             depthwise_conv2d_v1_2::is_ignored_large_nhwc_weight_as_input_quant8_2,
-            depthwise_conv2d_v1_2::examples_large_nhwc_weight_as_input_quant8_2);
+            depthwise_conv2d_v1_2::get_examples_large_nhwc_weight_as_input_quant8_2());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nchw_2) {
     execute(depthwise_conv2d_v1_2::CreateModel_large_nchw_2,
             depthwise_conv2d_v1_2::is_ignored_large_nchw_2,
-            depthwise_conv2d_v1_2::examples_large_nchw_2);
+            depthwise_conv2d_v1_2::get_examples_large_nchw_2());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nchw_relaxed_2) {
     execute(depthwise_conv2d_v1_2::CreateModel_large_nchw_relaxed_2,
             depthwise_conv2d_v1_2::is_ignored_large_nchw_relaxed_2,
-            depthwise_conv2d_v1_2::examples_large_nchw_relaxed_2);
+            depthwise_conv2d_v1_2::get_examples_large_nchw_relaxed_2());
+}
+
+TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nchw_float16_2) {
+    execute(depthwise_conv2d_v1_2::CreateModel_large_nchw_float16_2,
+            depthwise_conv2d_v1_2::is_ignored_large_nchw_float16_2,
+            depthwise_conv2d_v1_2::get_examples_large_nchw_float16_2());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nchw_quant8_2) {
     execute(depthwise_conv2d_v1_2::CreateModel_large_nchw_quant8_2,
             depthwise_conv2d_v1_2::is_ignored_large_nchw_quant8_2,
-            depthwise_conv2d_v1_2::examples_large_nchw_quant8_2);
+            depthwise_conv2d_v1_2::get_examples_large_nchw_quant8_2());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nchw_weight_as_input_2) {
     execute(depthwise_conv2d_v1_2::CreateModel_large_nchw_weight_as_input_2,
             depthwise_conv2d_v1_2::is_ignored_large_nchw_weight_as_input_2,
-            depthwise_conv2d_v1_2::examples_large_nchw_weight_as_input_2);
+            depthwise_conv2d_v1_2::get_examples_large_nchw_weight_as_input_2());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nchw_weight_as_input_relaxed_2) {
     execute(depthwise_conv2d_v1_2::CreateModel_large_nchw_weight_as_input_relaxed_2,
             depthwise_conv2d_v1_2::is_ignored_large_nchw_weight_as_input_relaxed_2,
-            depthwise_conv2d_v1_2::examples_large_nchw_weight_as_input_relaxed_2);
+            depthwise_conv2d_v1_2::get_examples_large_nchw_weight_as_input_relaxed_2());
+}
+
+TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nchw_weight_as_input_float16_2) {
+    execute(depthwise_conv2d_v1_2::CreateModel_large_nchw_weight_as_input_float16_2,
+            depthwise_conv2d_v1_2::is_ignored_large_nchw_weight_as_input_float16_2,
+            depthwise_conv2d_v1_2::get_examples_large_nchw_weight_as_input_float16_2());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_large_nchw_weight_as_input_quant8_2) {
     execute(depthwise_conv2d_v1_2::CreateModel_large_nchw_weight_as_input_quant8_2,
             depthwise_conv2d_v1_2::is_ignored_large_nchw_weight_as_input_quant8_2,
-            depthwise_conv2d_v1_2::examples_large_nchw_weight_as_input_quant8_2);
+            depthwise_conv2d_v1_2::get_examples_large_nchw_weight_as_input_quant8_2());
 }
 
diff --git a/nn/runtime/test/generated/tests/depthwise_conv_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/depthwise_conv_relaxed.mod.py.cpp
index 381e765..dd20d9e 100644
--- a/nn/runtime/test/generated/tests/depthwise_conv_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depthwise_conv_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, depthwise_conv_relaxed) {
     execute(depthwise_conv_relaxed::CreateModel,
             depthwise_conv_relaxed::is_ignored,
-            depthwise_conv_relaxed::examples);
+            depthwise_conv_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/dequantize.mod.py.cpp b/nn/runtime/test/generated/tests/dequantize.mod.py.cpp
index 030b228..6dc77a0 100644
--- a/nn/runtime/test/generated/tests/dequantize.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/dequantize.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, dequantize) {
     execute(dequantize::CreateModel,
             dequantize::is_ignored,
-            dequantize::examples);
+            dequantize::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/dequantize_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/dequantize_relaxed.mod.py.cpp
index f2b58ab..2c14fd4 100644
--- a/nn/runtime/test/generated/tests/dequantize_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/dequantize_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, dequantize_relaxed) {
     execute(dequantize_relaxed::CreateModel,
             dequantize_relaxed::is_ignored,
-            dequantize_relaxed::examples);
+            dequantize_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/div.mod.py.cpp b/nn/runtime/test/generated/tests/div.mod.py.cpp
index e70a442..9e7383b 100644
--- a/nn/runtime/test/generated/tests/div.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/div.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, div) {
     execute(div::CreateModel,
             div::is_ignored,
-            div::examples);
+            div::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/div_broadcast_float.mod.py.cpp b/nn/runtime/test/generated/tests/div_broadcast_float.mod.py.cpp
index d7bfb9f..3a0c0c4 100644
--- a/nn/runtime/test/generated/tests/div_broadcast_float.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/div_broadcast_float.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, div_broadcast_float) {
     execute(div_broadcast_float::CreateModel,
             div_broadcast_float::is_ignored,
-            div_broadcast_float::examples);
+            div_broadcast_float::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/div_broadcast_float16.mod.py.cpp b/nn/runtime/test/generated/tests/div_broadcast_float16.mod.py.cpp
index 07bd719..b79e834 100644
--- a/nn/runtime/test/generated/tests/div_broadcast_float16.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/div_broadcast_float16.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, div_broadcast_float16) {
     execute(div_broadcast_float16::CreateModel,
             div_broadcast_float16::is_ignored,
-            div_broadcast_float16::examples);
+            div_broadcast_float16::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/div_broadcast_float_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/div_broadcast_float_relaxed.mod.py.cpp
index e9c6d26..e2319ae 100644
--- a/nn/runtime/test/generated/tests/div_broadcast_float_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/div_broadcast_float_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, div_broadcast_float_relaxed) {
     execute(div_broadcast_float_relaxed::CreateModel,
             div_broadcast_float_relaxed::is_ignored,
-            div_broadcast_float_relaxed::examples);
+            div_broadcast_float_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/div_float16.mod.py.cpp b/nn/runtime/test/generated/tests/div_float16.mod.py.cpp
index 00007bc..9dcadcf 100644
--- a/nn/runtime/test/generated/tests/div_float16.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/div_float16.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, div_float16) {
     execute(div_float16::CreateModel,
             div_float16::is_ignored,
-            div_float16::examples);
+            div_float16::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/div_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/div_relaxed.mod.py.cpp
index 324cdc3..c3f4f6e 100644
--- a/nn/runtime/test/generated/tests/div_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/div_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, div_relaxed) {
     execute(div_relaxed::CreateModel,
             div_relaxed::is_ignored,
-            div_relaxed::examples);
+            div_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/embedding_lookup.mod.py.cpp b/nn/runtime/test/generated/tests/embedding_lookup.mod.py.cpp
index e5c03c6..e145bc5 100644
--- a/nn/runtime/test/generated/tests/embedding_lookup.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/embedding_lookup.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, embedding_lookup) {
     execute(embedding_lookup::CreateModel,
             embedding_lookup::is_ignored,
-            embedding_lookup::examples);
+            embedding_lookup::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/embedding_lookup_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/embedding_lookup_relaxed.mod.py.cpp
index 649ee76..1d09c37 100644
--- a/nn/runtime/test/generated/tests/embedding_lookup_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/embedding_lookup_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, embedding_lookup_relaxed) {
     execute(embedding_lookup_relaxed::CreateModel,
             embedding_lookup_relaxed::is_ignored,
-            embedding_lookup_relaxed::examples);
+            embedding_lookup_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/expand_dims.mod.py.cpp b/nn/runtime/test/generated/tests/expand_dims.mod.py.cpp
index ece1b2f..d77620d 100644
--- a/nn/runtime/test/generated/tests/expand_dims.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/expand_dims.mod.py.cpp
@@ -12,96 +12,96 @@
 TEST_F(GeneratedTests, expand_dims) {
     execute(expand_dims::CreateModel,
             expand_dims::is_ignored,
-            expand_dims::examples);
+            expand_dims::get_examples());
 }
 
 TEST_F(GeneratedTests, expand_dims_relaxed) {
     execute(expand_dims::CreateModel_relaxed,
             expand_dims::is_ignored_relaxed,
-            expand_dims::examples_relaxed);
+            expand_dims::get_examples_relaxed());
 }
 
 TEST_F(GeneratedTests, expand_dims_quant8) {
     execute(expand_dims::CreateModel_quant8,
             expand_dims::is_ignored_quant8,
-            expand_dims::examples_quant8);
+            expand_dims::get_examples_quant8());
 }
 
 TEST_F(GeneratedTests, expand_dims_int32) {
     execute(expand_dims::CreateModel_int32,
             expand_dims::is_ignored_int32,
-            expand_dims::examples_int32);
+            expand_dims::get_examples_int32());
 }
 
 TEST_F(GeneratedTests, expand_dims_2) {
     execute(expand_dims::CreateModel_2,
             expand_dims::is_ignored_2,
-            expand_dims::examples_2);
+            expand_dims::get_examples_2());
 }
 
 TEST_F(GeneratedTests, expand_dims_relaxed_2) {
     execute(expand_dims::CreateModel_relaxed_2,
             expand_dims::is_ignored_relaxed_2,
-            expand_dims::examples_relaxed_2);
+            expand_dims::get_examples_relaxed_2());
 }
 
 TEST_F(GeneratedTests, expand_dims_quant8_2) {
     execute(expand_dims::CreateModel_quant8_2,
             expand_dims::is_ignored_quant8_2,
-            expand_dims::examples_quant8_2);
+            expand_dims::get_examples_quant8_2());
 }
 
 TEST_F(GeneratedTests, expand_dims_int32_2) {
     execute(expand_dims::CreateModel_int32_2,
             expand_dims::is_ignored_int32_2,
-            expand_dims::examples_int32_2);
+            expand_dims::get_examples_int32_2());
 }
 
 TEST_F(GeneratedTests, expand_dims_3) {
     execute(expand_dims::CreateModel_3,
             expand_dims::is_ignored_3,
-            expand_dims::examples_3);
+            expand_dims::get_examples_3());
 }
 
 TEST_F(GeneratedTests, expand_dims_relaxed_3) {
     execute(expand_dims::CreateModel_relaxed_3,
             expand_dims::is_ignored_relaxed_3,
-            expand_dims::examples_relaxed_3);
+            expand_dims::get_examples_relaxed_3());
 }
 
 TEST_F(GeneratedTests, expand_dims_quant8_3) {
     execute(expand_dims::CreateModel_quant8_3,
             expand_dims::is_ignored_quant8_3,
-            expand_dims::examples_quant8_3);
+            expand_dims::get_examples_quant8_3());
 }
 
 TEST_F(GeneratedTests, expand_dims_int32_3) {
     execute(expand_dims::CreateModel_int32_3,
             expand_dims::is_ignored_int32_3,
-            expand_dims::examples_int32_3);
+            expand_dims::get_examples_int32_3());
 }
 
 TEST_F(GeneratedTests, expand_dims_4) {
     execute(expand_dims::CreateModel_4,
             expand_dims::is_ignored_4,
-            expand_dims::examples_4);
+            expand_dims::get_examples_4());
 }
 
 TEST_F(GeneratedTests, expand_dims_relaxed_4) {
     execute(expand_dims::CreateModel_relaxed_4,
             expand_dims::is_ignored_relaxed_4,
-            expand_dims::examples_relaxed_4);
+            expand_dims::get_examples_relaxed_4());
 }
 
 TEST_F(GeneratedTests, expand_dims_quant8_4) {
     execute(expand_dims::CreateModel_quant8_4,
             expand_dims::is_ignored_quant8_4,
-            expand_dims::examples_quant8_4);
+            expand_dims::get_examples_quant8_4());
 }
 
 TEST_F(GeneratedTests, expand_dims_int32_4) {
     execute(expand_dims::CreateModel_int32_4,
             expand_dims::is_ignored_int32_4,
-            expand_dims::examples_int32_4);
+            expand_dims::get_examples_int32_4());
 }
 
diff --git a/nn/runtime/test/generated/tests/floor.mod.py.cpp b/nn/runtime/test/generated/tests/floor.mod.py.cpp
index 0621fd6..85fa666 100644
--- a/nn/runtime/test/generated/tests/floor.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/floor.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, floor) {
     execute(floor::CreateModel,
             floor::is_ignored,
-            floor::examples);
+            floor::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/floor_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/floor_relaxed.mod.py.cpp
index 90dea42..407ad8e 100644
--- a/nn/runtime/test/generated/tests/floor_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/floor_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, floor_relaxed) {
     execute(floor_relaxed::CreateModel,
             floor_relaxed::is_ignored,
-            floor_relaxed::examples);
+            floor_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/fully_connected_float.mod.py.cpp b/nn/runtime/test/generated/tests/fully_connected_float.mod.py.cpp
index c36bebc..087be5a 100644
--- a/nn/runtime/test/generated/tests/fully_connected_float.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/fully_connected_float.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, fully_connected_float) {
     execute(fully_connected_float::CreateModel,
             fully_connected_float::is_ignored,
-            fully_connected_float::examples);
+            fully_connected_float::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/fully_connected_float_2.mod.py.cpp b/nn/runtime/test/generated/tests/fully_connected_float_2.mod.py.cpp
index 2452310..0d36de4 100644
--- a/nn/runtime/test/generated/tests/fully_connected_float_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/fully_connected_float_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, fully_connected_float_2) {
     execute(fully_connected_float_2::CreateModel,
             fully_connected_float_2::is_ignored,
-            fully_connected_float_2::examples);
+            fully_connected_float_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/fully_connected_float_2_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/fully_connected_float_2_relaxed.mod.py.cpp
index 0bb39ad..2564313 100644
--- a/nn/runtime/test/generated/tests/fully_connected_float_2_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/fully_connected_float_2_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, fully_connected_float_2_relaxed) {
     execute(fully_connected_float_2_relaxed::CreateModel,
             fully_connected_float_2_relaxed::is_ignored,
-            fully_connected_float_2_relaxed::examples);
+            fully_connected_float_2_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/fully_connected_float_3.mod.py.cpp b/nn/runtime/test/generated/tests/fully_connected_float_3.mod.py.cpp
index 8646cd1..2aabc05 100644
--- a/nn/runtime/test/generated/tests/fully_connected_float_3.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/fully_connected_float_3.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, fully_connected_float_3) {
     execute(fully_connected_float_3::CreateModel,
             fully_connected_float_3::is_ignored,
-            fully_connected_float_3::examples);
+            fully_connected_float_3::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/fully_connected_float_4d_simple.mod.py.cpp b/nn/runtime/test/generated/tests/fully_connected_float_4d_simple.mod.py.cpp
index 0bfa2c3..27753f2 100644
--- a/nn/runtime/test/generated/tests/fully_connected_float_4d_simple.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/fully_connected_float_4d_simple.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, fully_connected_float_4d_simple) {
     execute(fully_connected_float_4d_simple::CreateModel,
             fully_connected_float_4d_simple::is_ignored,
-            fully_connected_float_4d_simple::examples);
+            fully_connected_float_4d_simple::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/fully_connected_float_4d_simple_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/fully_connected_float_4d_simple_relaxed.mod.py.cpp
index 4fe494b..d266230 100644
--- a/nn/runtime/test/generated/tests/fully_connected_float_4d_simple_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/fully_connected_float_4d_simple_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, fully_connected_float_4d_simple_relaxed) {
     execute(fully_connected_float_4d_simple_relaxed::CreateModel,
             fully_connected_float_4d_simple_relaxed::is_ignored,
-            fully_connected_float_4d_simple_relaxed::examples);
+            fully_connected_float_4d_simple_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/fully_connected_float_large.mod.py.cpp b/nn/runtime/test/generated/tests/fully_connected_float_large.mod.py.cpp
index 9a4c393..a2a2365 100644
--- a/nn/runtime/test/generated/tests/fully_connected_float_large.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/fully_connected_float_large.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, fully_connected_float_large) {
     execute(fully_connected_float_large::CreateModel,
             fully_connected_float_large::is_ignored,
-            fully_connected_float_large::examples);
+            fully_connected_float_large::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/fully_connected_float_large_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/fully_connected_float_large_relaxed.mod.py.cpp
index 8e05f82..5db5a3b 100644
--- a/nn/runtime/test/generated/tests/fully_connected_float_large_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/fully_connected_float_large_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, fully_connected_float_large_relaxed) {
     execute(fully_connected_float_large_relaxed::CreateModel,
             fully_connected_float_large_relaxed::is_ignored,
-            fully_connected_float_large_relaxed::examples);
+            fully_connected_float_large_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/fully_connected_float_large_weights_as_inputs.mod.py.cpp b/nn/runtime/test/generated/tests/fully_connected_float_large_weights_as_inputs.mod.py.cpp
index 930c0de..f9b00e8 100644
--- a/nn/runtime/test/generated/tests/fully_connected_float_large_weights_as_inputs.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/fully_connected_float_large_weights_as_inputs.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, fully_connected_float_large_weights_as_inputs) {
     execute(fully_connected_float_large_weights_as_inputs::CreateModel,
             fully_connected_float_large_weights_as_inputs::is_ignored,
-            fully_connected_float_large_weights_as_inputs::examples);
+            fully_connected_float_large_weights_as_inputs::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/fully_connected_float_large_weights_as_inputs_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/fully_connected_float_large_weights_as_inputs_relaxed.mod.py.cpp
index 6b4ccae..a38ccd7 100644
--- a/nn/runtime/test/generated/tests/fully_connected_float_large_weights_as_inputs_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/fully_connected_float_large_weights_as_inputs_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, fully_connected_float_large_weights_as_inputs_relaxed) {
     execute(fully_connected_float_large_weights_as_inputs_relaxed::CreateModel,
             fully_connected_float_large_weights_as_inputs_relaxed::is_ignored,
-            fully_connected_float_large_weights_as_inputs_relaxed::examples);
+            fully_connected_float_large_weights_as_inputs_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/fully_connected_float_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/fully_connected_float_relaxed.mod.py.cpp
index 3a1663b..169f398 100644
--- a/nn/runtime/test/generated/tests/fully_connected_float_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/fully_connected_float_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, fully_connected_float_relaxed) {
     execute(fully_connected_float_relaxed::CreateModel,
             fully_connected_float_relaxed::is_ignored,
-            fully_connected_float_relaxed::examples);
+            fully_connected_float_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/fully_connected_float_weights_as_inputs.mod.py.cpp b/nn/runtime/test/generated/tests/fully_connected_float_weights_as_inputs.mod.py.cpp
index bb206a5..7a67532 100644
--- a/nn/runtime/test/generated/tests/fully_connected_float_weights_as_inputs.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/fully_connected_float_weights_as_inputs.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, fully_connected_float_weights_as_inputs) {
     execute(fully_connected_float_weights_as_inputs::CreateModel,
             fully_connected_float_weights_as_inputs::is_ignored,
-            fully_connected_float_weights_as_inputs::examples);
+            fully_connected_float_weights_as_inputs::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/fully_connected_float_weights_as_inputs_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/fully_connected_float_weights_as_inputs_relaxed.mod.py.cpp
index 08eb68e..f25cdc8 100644
--- a/nn/runtime/test/generated/tests/fully_connected_float_weights_as_inputs_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/fully_connected_float_weights_as_inputs_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, fully_connected_float_weights_as_inputs_relaxed) {
     execute(fully_connected_float_weights_as_inputs_relaxed::CreateModel,
             fully_connected_float_weights_as_inputs_relaxed::is_ignored,
-            fully_connected_float_weights_as_inputs_relaxed::examples);
+            fully_connected_float_weights_as_inputs_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/fully_connected_quant8.mod.py.cpp b/nn/runtime/test/generated/tests/fully_connected_quant8.mod.py.cpp
index 84f60a5..ac18b8a 100644
--- a/nn/runtime/test/generated/tests/fully_connected_quant8.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/fully_connected_quant8.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, fully_connected_quant8) {
     execute(fully_connected_quant8::CreateModel,
             fully_connected_quant8::is_ignored,
-            fully_connected_quant8::examples);
+            fully_connected_quant8::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/fully_connected_quant8_2.mod.py.cpp b/nn/runtime/test/generated/tests/fully_connected_quant8_2.mod.py.cpp
index 39404f5..5615a78 100644
--- a/nn/runtime/test/generated/tests/fully_connected_quant8_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/fully_connected_quant8_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, fully_connected_quant8_2) {
     execute(fully_connected_quant8_2::CreateModel,
             fully_connected_quant8_2::is_ignored,
-            fully_connected_quant8_2::examples);
+            fully_connected_quant8_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/fully_connected_quant8_large.mod.py.cpp b/nn/runtime/test/generated/tests/fully_connected_quant8_large.mod.py.cpp
index 08c6b86..5f7b9e8 100644
--- a/nn/runtime/test/generated/tests/fully_connected_quant8_large.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/fully_connected_quant8_large.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, fully_connected_quant8_large) {
     execute(fully_connected_quant8_large::CreateModel,
             fully_connected_quant8_large::is_ignored,
-            fully_connected_quant8_large::examples);
+            fully_connected_quant8_large::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/fully_connected_quant8_large_weights_as_inputs.mod.py.cpp b/nn/runtime/test/generated/tests/fully_connected_quant8_large_weights_as_inputs.mod.py.cpp
index b041517..796c3ba 100644
--- a/nn/runtime/test/generated/tests/fully_connected_quant8_large_weights_as_inputs.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/fully_connected_quant8_large_weights_as_inputs.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, fully_connected_quant8_large_weights_as_inputs) {
     execute(fully_connected_quant8_large_weights_as_inputs::CreateModel,
             fully_connected_quant8_large_weights_as_inputs::is_ignored,
-            fully_connected_quant8_large_weights_as_inputs::examples);
+            fully_connected_quant8_large_weights_as_inputs::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/fully_connected_quant8_weights_as_inputs.mod.py.cpp b/nn/runtime/test/generated/tests/fully_connected_quant8_weights_as_inputs.mod.py.cpp
index d5c2294..51da1f6 100644
--- a/nn/runtime/test/generated/tests/fully_connected_quant8_weights_as_inputs.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/fully_connected_quant8_weights_as_inputs.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, fully_connected_quant8_weights_as_inputs) {
     execute(fully_connected_quant8_weights_as_inputs::CreateModel,
             fully_connected_quant8_weights_as_inputs::is_ignored,
-            fully_connected_quant8_weights_as_inputs::examples);
+            fully_connected_quant8_weights_as_inputs::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/gather.mod.py.cpp b/nn/runtime/test/generated/tests/gather.mod.py.cpp
index 2f9e8f9..f1f89bb 100644
--- a/nn/runtime/test/generated/tests/gather.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/gather.mod.py.cpp
@@ -12,240 +12,240 @@
 TEST_F(GeneratedTests, gather) {
     execute(gather::CreateModel,
             gather::is_ignored,
-            gather::examples);
+            gather::get_examples());
 }
 
 TEST_F(GeneratedTests, gather_relaxed) {
     execute(gather::CreateModel_relaxed,
             gather::is_ignored_relaxed,
-            gather::examples_relaxed);
+            gather::get_examples_relaxed());
 }
 
 TEST_F(GeneratedTests, gather_quant8) {
     execute(gather::CreateModel_quant8,
             gather::is_ignored_quant8,
-            gather::examples_quant8);
+            gather::get_examples_quant8());
 }
 
 TEST_F(GeneratedTests, gather_int32) {
     execute(gather::CreateModel_int32,
             gather::is_ignored_int32,
-            gather::examples_int32);
+            gather::get_examples_int32());
 }
 
 TEST_F(GeneratedTests, gather_float16) {
     execute(gather::CreateModel_float16,
             gather::is_ignored_float16,
-            gather::examples_float16);
+            gather::get_examples_float16());
 }
 
 TEST_F(GeneratedTests, gather_2) {
     execute(gather::CreateModel_2,
             gather::is_ignored_2,
-            gather::examples_2);
+            gather::get_examples_2());
 }
 
 TEST_F(GeneratedTests, gather_relaxed_2) {
     execute(gather::CreateModel_relaxed_2,
             gather::is_ignored_relaxed_2,
-            gather::examples_relaxed_2);
+            gather::get_examples_relaxed_2());
 }
 
 TEST_F(GeneratedTests, gather_quant8_2) {
     execute(gather::CreateModel_quant8_2,
             gather::is_ignored_quant8_2,
-            gather::examples_quant8_2);
+            gather::get_examples_quant8_2());
 }
 
 TEST_F(GeneratedTests, gather_int32_2) {
     execute(gather::CreateModel_int32_2,
             gather::is_ignored_int32_2,
-            gather::examples_int32_2);
+            gather::get_examples_int32_2());
 }
 
 TEST_F(GeneratedTests, gather_float16_2) {
     execute(gather::CreateModel_float16_2,
             gather::is_ignored_float16_2,
-            gather::examples_float16_2);
+            gather::get_examples_float16_2());
 }
 
 TEST_F(GeneratedTests, gather_3) {
     execute(gather::CreateModel_3,
             gather::is_ignored_3,
-            gather::examples_3);
+            gather::get_examples_3());
 }
 
 TEST_F(GeneratedTests, gather_relaxed_3) {
     execute(gather::CreateModel_relaxed_3,
             gather::is_ignored_relaxed_3,
-            gather::examples_relaxed_3);
+            gather::get_examples_relaxed_3());
 }
 
 TEST_F(GeneratedTests, gather_quant8_3) {
     execute(gather::CreateModel_quant8_3,
             gather::is_ignored_quant8_3,
-            gather::examples_quant8_3);
+            gather::get_examples_quant8_3());
 }
 
 TEST_F(GeneratedTests, gather_int32_3) {
     execute(gather::CreateModel_int32_3,
             gather::is_ignored_int32_3,
-            gather::examples_int32_3);
+            gather::get_examples_int32_3());
 }
 
 TEST_F(GeneratedTests, gather_float16_3) {
     execute(gather::CreateModel_float16_3,
             gather::is_ignored_float16_3,
-            gather::examples_float16_3);
+            gather::get_examples_float16_3());
 }
 
 TEST_F(GeneratedTests, gather_4) {
     execute(gather::CreateModel_4,
             gather::is_ignored_4,
-            gather::examples_4);
+            gather::get_examples_4());
 }
 
 TEST_F(GeneratedTests, gather_relaxed_4) {
     execute(gather::CreateModel_relaxed_4,
             gather::is_ignored_relaxed_4,
-            gather::examples_relaxed_4);
+            gather::get_examples_relaxed_4());
 }
 
 TEST_F(GeneratedTests, gather_quant8_4) {
     execute(gather::CreateModel_quant8_4,
             gather::is_ignored_quant8_4,
-            gather::examples_quant8_4);
+            gather::get_examples_quant8_4());
 }
 
 TEST_F(GeneratedTests, gather_int32_4) {
     execute(gather::CreateModel_int32_4,
             gather::is_ignored_int32_4,
-            gather::examples_int32_4);
+            gather::get_examples_int32_4());
 }
 
 TEST_F(GeneratedTests, gather_float16_4) {
     execute(gather::CreateModel_float16_4,
             gather::is_ignored_float16_4,
-            gather::examples_float16_4);
+            gather::get_examples_float16_4());
 }
 
 TEST_F(GeneratedTests, gather_5) {
     execute(gather::CreateModel_5,
             gather::is_ignored_5,
-            gather::examples_5);
+            gather::get_examples_5());
 }
 
 TEST_F(GeneratedTests, gather_relaxed_5) {
     execute(gather::CreateModel_relaxed_5,
             gather::is_ignored_relaxed_5,
-            gather::examples_relaxed_5);
+            gather::get_examples_relaxed_5());
 }
 
 TEST_F(GeneratedTests, gather_quant8_5) {
     execute(gather::CreateModel_quant8_5,
             gather::is_ignored_quant8_5,
-            gather::examples_quant8_5);
+            gather::get_examples_quant8_5());
 }
 
 TEST_F(GeneratedTests, gather_int32_5) {
     execute(gather::CreateModel_int32_5,
             gather::is_ignored_int32_5,
-            gather::examples_int32_5);
+            gather::get_examples_int32_5());
 }
 
 TEST_F(GeneratedTests, gather_float16_5) {
     execute(gather::CreateModel_float16_5,
             gather::is_ignored_float16_5,
-            gather::examples_float16_5);
+            gather::get_examples_float16_5());
 }
 
 TEST_F(GeneratedTests, gather_6) {
     execute(gather::CreateModel_6,
             gather::is_ignored_6,
-            gather::examples_6);
+            gather::get_examples_6());
 }
 
 TEST_F(GeneratedTests, gather_relaxed_6) {
     execute(gather::CreateModel_relaxed_6,
             gather::is_ignored_relaxed_6,
-            gather::examples_relaxed_6);
+            gather::get_examples_relaxed_6());
 }
 
 TEST_F(GeneratedTests, gather_quant8_6) {
     execute(gather::CreateModel_quant8_6,
             gather::is_ignored_quant8_6,
-            gather::examples_quant8_6);
+            gather::get_examples_quant8_6());
 }
 
 TEST_F(GeneratedTests, gather_int32_6) {
     execute(gather::CreateModel_int32_6,
             gather::is_ignored_int32_6,
-            gather::examples_int32_6);
+            gather::get_examples_int32_6());
 }
 
 TEST_F(GeneratedTests, gather_float16_6) {
     execute(gather::CreateModel_float16_6,
             gather::is_ignored_float16_6,
-            gather::examples_float16_6);
+            gather::get_examples_float16_6());
 }
 
 TEST_F(GeneratedTests, gather_7) {
     execute(gather::CreateModel_7,
             gather::is_ignored_7,
-            gather::examples_7);
+            gather::get_examples_7());
 }
 
 TEST_F(GeneratedTests, gather_relaxed_7) {
     execute(gather::CreateModel_relaxed_7,
             gather::is_ignored_relaxed_7,
-            gather::examples_relaxed_7);
+            gather::get_examples_relaxed_7());
 }
 
 TEST_F(GeneratedTests, gather_quant8_7) {
     execute(gather::CreateModel_quant8_7,
             gather::is_ignored_quant8_7,
-            gather::examples_quant8_7);
+            gather::get_examples_quant8_7());
 }
 
 TEST_F(GeneratedTests, gather_int32_7) {
     execute(gather::CreateModel_int32_7,
             gather::is_ignored_int32_7,
-            gather::examples_int32_7);
+            gather::get_examples_int32_7());
 }
 
 TEST_F(GeneratedTests, gather_float16_7) {
     execute(gather::CreateModel_float16_7,
             gather::is_ignored_float16_7,
-            gather::examples_float16_7);
+            gather::get_examples_float16_7());
 }
 
 TEST_F(GeneratedTests, gather_8) {
     execute(gather::CreateModel_8,
             gather::is_ignored_8,
-            gather::examples_8);
+            gather::get_examples_8());
 }
 
 TEST_F(GeneratedTests, gather_relaxed_8) {
     execute(gather::CreateModel_relaxed_8,
             gather::is_ignored_relaxed_8,
-            gather::examples_relaxed_8);
+            gather::get_examples_relaxed_8());
 }
 
 TEST_F(GeneratedTests, gather_quant8_8) {
     execute(gather::CreateModel_quant8_8,
             gather::is_ignored_quant8_8,
-            gather::examples_quant8_8);
+            gather::get_examples_quant8_8());
 }
 
 TEST_F(GeneratedTests, gather_int32_8) {
     execute(gather::CreateModel_int32_8,
             gather::is_ignored_int32_8,
-            gather::examples_int32_8);
+            gather::get_examples_int32_8());
 }
 
 TEST_F(GeneratedTests, gather_float16_8) {
     execute(gather::CreateModel_float16_8,
             gather::is_ignored_float16_8,
-            gather::examples_float16_8);
+            gather::get_examples_float16_8());
 }
 
diff --git a/nn/runtime/test/generated/tests/gather_higher_rank.mod.py.cpp b/nn/runtime/test/generated/tests/gather_higher_rank.mod.py.cpp
index 5673322..0edc083 100644
--- a/nn/runtime/test/generated/tests/gather_higher_rank.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/gather_higher_rank.mod.py.cpp
@@ -12,24 +12,24 @@
 TEST_F(GeneratedTests, gather_higher_rank) {
     execute(gather_higher_rank::CreateModel,
             gather_higher_rank::is_ignored,
-            gather_higher_rank::examples);
+            gather_higher_rank::get_examples());
 }
 
 TEST_F(GeneratedTests, gather_higher_rank_relaxed) {
     execute(gather_higher_rank::CreateModel_relaxed,
             gather_higher_rank::is_ignored_relaxed,
-            gather_higher_rank::examples_relaxed);
+            gather_higher_rank::get_examples_relaxed());
 }
 
 TEST_F(GeneratedTests, gather_higher_rank_quant8) {
     execute(gather_higher_rank::CreateModel_quant8,
             gather_higher_rank::is_ignored_quant8,
-            gather_higher_rank::examples_quant8);
+            gather_higher_rank::get_examples_quant8());
 }
 
 TEST_F(GeneratedTests, gather_higher_rank_int32) {
     execute(gather_higher_rank::CreateModel_int32,
             gather_higher_rank::is_ignored_int32,
-            gather_higher_rank::examples_int32);
+            gather_higher_rank::get_examples_int32());
 }
 
diff --git a/nn/runtime/test/generated/tests/grouped_conv2d.mod.py.cpp b/nn/runtime/test/generated/tests/grouped_conv2d.mod.py.cpp
index b2bb071..16ae0a6 100644
--- a/nn/runtime/test/generated/tests/grouped_conv2d.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/grouped_conv2d.mod.py.cpp
@@ -12,432 +12,432 @@
 TEST_F(GeneratedTests, grouped_conv2d_nhwc_none) {
     execute(grouped_conv2d::CreateModel_nhwc_none,
             grouped_conv2d::is_ignored_nhwc_none,
-            grouped_conv2d::examples_nhwc_none);
+            grouped_conv2d::get_examples_nhwc_none());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nhwc_none_weight_as_input) {
     execute(grouped_conv2d::CreateModel_nhwc_none_weight_as_input,
             grouped_conv2d::is_ignored_nhwc_none_weight_as_input,
-            grouped_conv2d::examples_nhwc_none_weight_as_input);
+            grouped_conv2d::get_examples_nhwc_none_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nhwc_none_relaxed) {
     execute(grouped_conv2d::CreateModel_nhwc_none_relaxed,
             grouped_conv2d::is_ignored_nhwc_none_relaxed,
-            grouped_conv2d::examples_nhwc_none_relaxed);
+            grouped_conv2d::get_examples_nhwc_none_relaxed());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nhwc_none_relaxed_weight_as_input) {
     execute(grouped_conv2d::CreateModel_nhwc_none_relaxed_weight_as_input,
             grouped_conv2d::is_ignored_nhwc_none_relaxed_weight_as_input,
-            grouped_conv2d::examples_nhwc_none_relaxed_weight_as_input);
+            grouped_conv2d::get_examples_nhwc_none_relaxed_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nhwc_none_quant8) {
     execute(grouped_conv2d::CreateModel_nhwc_none_quant8,
             grouped_conv2d::is_ignored_nhwc_none_quant8,
-            grouped_conv2d::examples_nhwc_none_quant8);
+            grouped_conv2d::get_examples_nhwc_none_quant8());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nhwc_none_quant8_weight_as_input) {
     execute(grouped_conv2d::CreateModel_nhwc_none_quant8_weight_as_input,
             grouped_conv2d::is_ignored_nhwc_none_quant8_weight_as_input,
-            grouped_conv2d::examples_nhwc_none_quant8_weight_as_input);
+            grouped_conv2d::get_examples_nhwc_none_quant8_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nhwc_relu) {
     execute(grouped_conv2d::CreateModel_nhwc_relu,
             grouped_conv2d::is_ignored_nhwc_relu,
-            grouped_conv2d::examples_nhwc_relu);
+            grouped_conv2d::get_examples_nhwc_relu());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nhwc_relu_weight_as_input) {
     execute(grouped_conv2d::CreateModel_nhwc_relu_weight_as_input,
             grouped_conv2d::is_ignored_nhwc_relu_weight_as_input,
-            grouped_conv2d::examples_nhwc_relu_weight_as_input);
+            grouped_conv2d::get_examples_nhwc_relu_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nhwc_relu_relaxed) {
     execute(grouped_conv2d::CreateModel_nhwc_relu_relaxed,
             grouped_conv2d::is_ignored_nhwc_relu_relaxed,
-            grouped_conv2d::examples_nhwc_relu_relaxed);
+            grouped_conv2d::get_examples_nhwc_relu_relaxed());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nhwc_relu_relaxed_weight_as_input) {
     execute(grouped_conv2d::CreateModel_nhwc_relu_relaxed_weight_as_input,
             grouped_conv2d::is_ignored_nhwc_relu_relaxed_weight_as_input,
-            grouped_conv2d::examples_nhwc_relu_relaxed_weight_as_input);
+            grouped_conv2d::get_examples_nhwc_relu_relaxed_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nhwc_relu_quant8) {
     execute(grouped_conv2d::CreateModel_nhwc_relu_quant8,
             grouped_conv2d::is_ignored_nhwc_relu_quant8,
-            grouped_conv2d::examples_nhwc_relu_quant8);
+            grouped_conv2d::get_examples_nhwc_relu_quant8());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nhwc_relu_quant8_weight_as_input) {
     execute(grouped_conv2d::CreateModel_nhwc_relu_quant8_weight_as_input,
             grouped_conv2d::is_ignored_nhwc_relu_quant8_weight_as_input,
-            grouped_conv2d::examples_nhwc_relu_quant8_weight_as_input);
+            grouped_conv2d::get_examples_nhwc_relu_quant8_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nhwc_relu1) {
     execute(grouped_conv2d::CreateModel_nhwc_relu1,
             grouped_conv2d::is_ignored_nhwc_relu1,
-            grouped_conv2d::examples_nhwc_relu1);
+            grouped_conv2d::get_examples_nhwc_relu1());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nhwc_relu1_weight_as_input) {
     execute(grouped_conv2d::CreateModel_nhwc_relu1_weight_as_input,
             grouped_conv2d::is_ignored_nhwc_relu1_weight_as_input,
-            grouped_conv2d::examples_nhwc_relu1_weight_as_input);
+            grouped_conv2d::get_examples_nhwc_relu1_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nhwc_relu1_relaxed) {
     execute(grouped_conv2d::CreateModel_nhwc_relu1_relaxed,
             grouped_conv2d::is_ignored_nhwc_relu1_relaxed,
-            grouped_conv2d::examples_nhwc_relu1_relaxed);
+            grouped_conv2d::get_examples_nhwc_relu1_relaxed());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nhwc_relu1_relaxed_weight_as_input) {
     execute(grouped_conv2d::CreateModel_nhwc_relu1_relaxed_weight_as_input,
             grouped_conv2d::is_ignored_nhwc_relu1_relaxed_weight_as_input,
-            grouped_conv2d::examples_nhwc_relu1_relaxed_weight_as_input);
+            grouped_conv2d::get_examples_nhwc_relu1_relaxed_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nhwc_relu1_quant8) {
     execute(grouped_conv2d::CreateModel_nhwc_relu1_quant8,
             grouped_conv2d::is_ignored_nhwc_relu1_quant8,
-            grouped_conv2d::examples_nhwc_relu1_quant8);
+            grouped_conv2d::get_examples_nhwc_relu1_quant8());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nhwc_relu1_quant8_weight_as_input) {
     execute(grouped_conv2d::CreateModel_nhwc_relu1_quant8_weight_as_input,
             grouped_conv2d::is_ignored_nhwc_relu1_quant8_weight_as_input,
-            grouped_conv2d::examples_nhwc_relu1_quant8_weight_as_input);
+            grouped_conv2d::get_examples_nhwc_relu1_quant8_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nhwc_relu6) {
     execute(grouped_conv2d::CreateModel_nhwc_relu6,
             grouped_conv2d::is_ignored_nhwc_relu6,
-            grouped_conv2d::examples_nhwc_relu6);
+            grouped_conv2d::get_examples_nhwc_relu6());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nhwc_relu6_weight_as_input) {
     execute(grouped_conv2d::CreateModel_nhwc_relu6_weight_as_input,
             grouped_conv2d::is_ignored_nhwc_relu6_weight_as_input,
-            grouped_conv2d::examples_nhwc_relu6_weight_as_input);
+            grouped_conv2d::get_examples_nhwc_relu6_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nhwc_relu6_relaxed) {
     execute(grouped_conv2d::CreateModel_nhwc_relu6_relaxed,
             grouped_conv2d::is_ignored_nhwc_relu6_relaxed,
-            grouped_conv2d::examples_nhwc_relu6_relaxed);
+            grouped_conv2d::get_examples_nhwc_relu6_relaxed());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nhwc_relu6_relaxed_weight_as_input) {
     execute(grouped_conv2d::CreateModel_nhwc_relu6_relaxed_weight_as_input,
             grouped_conv2d::is_ignored_nhwc_relu6_relaxed_weight_as_input,
-            grouped_conv2d::examples_nhwc_relu6_relaxed_weight_as_input);
+            grouped_conv2d::get_examples_nhwc_relu6_relaxed_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nhwc_relu6_quant8) {
     execute(grouped_conv2d::CreateModel_nhwc_relu6_quant8,
             grouped_conv2d::is_ignored_nhwc_relu6_quant8,
-            grouped_conv2d::examples_nhwc_relu6_quant8);
+            grouped_conv2d::get_examples_nhwc_relu6_quant8());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nhwc_relu6_quant8_weight_as_input) {
     execute(grouped_conv2d::CreateModel_nhwc_relu6_quant8_weight_as_input,
             grouped_conv2d::is_ignored_nhwc_relu6_quant8_weight_as_input,
-            grouped_conv2d::examples_nhwc_relu6_quant8_weight_as_input);
+            grouped_conv2d::get_examples_nhwc_relu6_quant8_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nchw_none) {
     execute(grouped_conv2d::CreateModel_nchw_none,
             grouped_conv2d::is_ignored_nchw_none,
-            grouped_conv2d::examples_nchw_none);
+            grouped_conv2d::get_examples_nchw_none());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nchw_none_weight_as_input) {
     execute(grouped_conv2d::CreateModel_nchw_none_weight_as_input,
             grouped_conv2d::is_ignored_nchw_none_weight_as_input,
-            grouped_conv2d::examples_nchw_none_weight_as_input);
+            grouped_conv2d::get_examples_nchw_none_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nchw_none_relaxed) {
     execute(grouped_conv2d::CreateModel_nchw_none_relaxed,
             grouped_conv2d::is_ignored_nchw_none_relaxed,
-            grouped_conv2d::examples_nchw_none_relaxed);
+            grouped_conv2d::get_examples_nchw_none_relaxed());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nchw_none_relaxed_weight_as_input) {
     execute(grouped_conv2d::CreateModel_nchw_none_relaxed_weight_as_input,
             grouped_conv2d::is_ignored_nchw_none_relaxed_weight_as_input,
-            grouped_conv2d::examples_nchw_none_relaxed_weight_as_input);
+            grouped_conv2d::get_examples_nchw_none_relaxed_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nchw_none_quant8) {
     execute(grouped_conv2d::CreateModel_nchw_none_quant8,
             grouped_conv2d::is_ignored_nchw_none_quant8,
-            grouped_conv2d::examples_nchw_none_quant8);
+            grouped_conv2d::get_examples_nchw_none_quant8());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nchw_none_quant8_weight_as_input) {
     execute(grouped_conv2d::CreateModel_nchw_none_quant8_weight_as_input,
             grouped_conv2d::is_ignored_nchw_none_quant8_weight_as_input,
-            grouped_conv2d::examples_nchw_none_quant8_weight_as_input);
+            grouped_conv2d::get_examples_nchw_none_quant8_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nchw_relu) {
     execute(grouped_conv2d::CreateModel_nchw_relu,
             grouped_conv2d::is_ignored_nchw_relu,
-            grouped_conv2d::examples_nchw_relu);
+            grouped_conv2d::get_examples_nchw_relu());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nchw_relu_weight_as_input) {
     execute(grouped_conv2d::CreateModel_nchw_relu_weight_as_input,
             grouped_conv2d::is_ignored_nchw_relu_weight_as_input,
-            grouped_conv2d::examples_nchw_relu_weight_as_input);
+            grouped_conv2d::get_examples_nchw_relu_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nchw_relu_relaxed) {
     execute(grouped_conv2d::CreateModel_nchw_relu_relaxed,
             grouped_conv2d::is_ignored_nchw_relu_relaxed,
-            grouped_conv2d::examples_nchw_relu_relaxed);
+            grouped_conv2d::get_examples_nchw_relu_relaxed());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nchw_relu_relaxed_weight_as_input) {
     execute(grouped_conv2d::CreateModel_nchw_relu_relaxed_weight_as_input,
             grouped_conv2d::is_ignored_nchw_relu_relaxed_weight_as_input,
-            grouped_conv2d::examples_nchw_relu_relaxed_weight_as_input);
+            grouped_conv2d::get_examples_nchw_relu_relaxed_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nchw_relu_quant8) {
     execute(grouped_conv2d::CreateModel_nchw_relu_quant8,
             grouped_conv2d::is_ignored_nchw_relu_quant8,
-            grouped_conv2d::examples_nchw_relu_quant8);
+            grouped_conv2d::get_examples_nchw_relu_quant8());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nchw_relu_quant8_weight_as_input) {
     execute(grouped_conv2d::CreateModel_nchw_relu_quant8_weight_as_input,
             grouped_conv2d::is_ignored_nchw_relu_quant8_weight_as_input,
-            grouped_conv2d::examples_nchw_relu_quant8_weight_as_input);
+            grouped_conv2d::get_examples_nchw_relu_quant8_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nchw_relu1) {
     execute(grouped_conv2d::CreateModel_nchw_relu1,
             grouped_conv2d::is_ignored_nchw_relu1,
-            grouped_conv2d::examples_nchw_relu1);
+            grouped_conv2d::get_examples_nchw_relu1());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nchw_relu1_weight_as_input) {
     execute(grouped_conv2d::CreateModel_nchw_relu1_weight_as_input,
             grouped_conv2d::is_ignored_nchw_relu1_weight_as_input,
-            grouped_conv2d::examples_nchw_relu1_weight_as_input);
+            grouped_conv2d::get_examples_nchw_relu1_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nchw_relu1_relaxed) {
     execute(grouped_conv2d::CreateModel_nchw_relu1_relaxed,
             grouped_conv2d::is_ignored_nchw_relu1_relaxed,
-            grouped_conv2d::examples_nchw_relu1_relaxed);
+            grouped_conv2d::get_examples_nchw_relu1_relaxed());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nchw_relu1_relaxed_weight_as_input) {
     execute(grouped_conv2d::CreateModel_nchw_relu1_relaxed_weight_as_input,
             grouped_conv2d::is_ignored_nchw_relu1_relaxed_weight_as_input,
-            grouped_conv2d::examples_nchw_relu1_relaxed_weight_as_input);
+            grouped_conv2d::get_examples_nchw_relu1_relaxed_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nchw_relu1_quant8) {
     execute(grouped_conv2d::CreateModel_nchw_relu1_quant8,
             grouped_conv2d::is_ignored_nchw_relu1_quant8,
-            grouped_conv2d::examples_nchw_relu1_quant8);
+            grouped_conv2d::get_examples_nchw_relu1_quant8());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nchw_relu1_quant8_weight_as_input) {
     execute(grouped_conv2d::CreateModel_nchw_relu1_quant8_weight_as_input,
             grouped_conv2d::is_ignored_nchw_relu1_quant8_weight_as_input,
-            grouped_conv2d::examples_nchw_relu1_quant8_weight_as_input);
+            grouped_conv2d::get_examples_nchw_relu1_quant8_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nchw_relu6) {
     execute(grouped_conv2d::CreateModel_nchw_relu6,
             grouped_conv2d::is_ignored_nchw_relu6,
-            grouped_conv2d::examples_nchw_relu6);
+            grouped_conv2d::get_examples_nchw_relu6());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nchw_relu6_weight_as_input) {
     execute(grouped_conv2d::CreateModel_nchw_relu6_weight_as_input,
             grouped_conv2d::is_ignored_nchw_relu6_weight_as_input,
-            grouped_conv2d::examples_nchw_relu6_weight_as_input);
+            grouped_conv2d::get_examples_nchw_relu6_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nchw_relu6_relaxed) {
     execute(grouped_conv2d::CreateModel_nchw_relu6_relaxed,
             grouped_conv2d::is_ignored_nchw_relu6_relaxed,
-            grouped_conv2d::examples_nchw_relu6_relaxed);
+            grouped_conv2d::get_examples_nchw_relu6_relaxed());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nchw_relu6_relaxed_weight_as_input) {
     execute(grouped_conv2d::CreateModel_nchw_relu6_relaxed_weight_as_input,
             grouped_conv2d::is_ignored_nchw_relu6_relaxed_weight_as_input,
-            grouped_conv2d::examples_nchw_relu6_relaxed_weight_as_input);
+            grouped_conv2d::get_examples_nchw_relu6_relaxed_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nchw_relu6_quant8) {
     execute(grouped_conv2d::CreateModel_nchw_relu6_quant8,
             grouped_conv2d::is_ignored_nchw_relu6_quant8,
-            grouped_conv2d::examples_nchw_relu6_quant8);
+            grouped_conv2d::get_examples_nchw_relu6_quant8());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_nchw_relu6_quant8_weight_as_input) {
     execute(grouped_conv2d::CreateModel_nchw_relu6_quant8_weight_as_input,
             grouped_conv2d::is_ignored_nchw_relu6_quant8_weight_as_input,
-            grouped_conv2d::examples_nchw_relu6_quant8_weight_as_input);
+            grouped_conv2d::get_examples_nchw_relu6_quant8_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_large_nhwc) {
     execute(grouped_conv2d::CreateModel_large_nhwc,
             grouped_conv2d::is_ignored_large_nhwc,
-            grouped_conv2d::examples_large_nhwc);
+            grouped_conv2d::get_examples_large_nhwc());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_large_nhwc_weight_as_input) {
     execute(grouped_conv2d::CreateModel_large_nhwc_weight_as_input,
             grouped_conv2d::is_ignored_large_nhwc_weight_as_input,
-            grouped_conv2d::examples_large_nhwc_weight_as_input);
+            grouped_conv2d::get_examples_large_nhwc_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_large_nhwc_relaxed) {
     execute(grouped_conv2d::CreateModel_large_nhwc_relaxed,
             grouped_conv2d::is_ignored_large_nhwc_relaxed,
-            grouped_conv2d::examples_large_nhwc_relaxed);
+            grouped_conv2d::get_examples_large_nhwc_relaxed());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_large_nhwc_relaxed_weight_as_input) {
     execute(grouped_conv2d::CreateModel_large_nhwc_relaxed_weight_as_input,
             grouped_conv2d::is_ignored_large_nhwc_relaxed_weight_as_input,
-            grouped_conv2d::examples_large_nhwc_relaxed_weight_as_input);
+            grouped_conv2d::get_examples_large_nhwc_relaxed_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_large_nhwc_quant8) {
     execute(grouped_conv2d::CreateModel_large_nhwc_quant8,
             grouped_conv2d::is_ignored_large_nhwc_quant8,
-            grouped_conv2d::examples_large_nhwc_quant8);
+            grouped_conv2d::get_examples_large_nhwc_quant8());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_large_nhwc_quant8_weight_as_input) {
     execute(grouped_conv2d::CreateModel_large_nhwc_quant8_weight_as_input,
             grouped_conv2d::is_ignored_large_nhwc_quant8_weight_as_input,
-            grouped_conv2d::examples_large_nhwc_quant8_weight_as_input);
+            grouped_conv2d::get_examples_large_nhwc_quant8_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_large_nchw) {
     execute(grouped_conv2d::CreateModel_large_nchw,
             grouped_conv2d::is_ignored_large_nchw,
-            grouped_conv2d::examples_large_nchw);
+            grouped_conv2d::get_examples_large_nchw());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_large_nchw_weight_as_input) {
     execute(grouped_conv2d::CreateModel_large_nchw_weight_as_input,
             grouped_conv2d::is_ignored_large_nchw_weight_as_input,
-            grouped_conv2d::examples_large_nchw_weight_as_input);
+            grouped_conv2d::get_examples_large_nchw_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_large_nchw_relaxed) {
     execute(grouped_conv2d::CreateModel_large_nchw_relaxed,
             grouped_conv2d::is_ignored_large_nchw_relaxed,
-            grouped_conv2d::examples_large_nchw_relaxed);
+            grouped_conv2d::get_examples_large_nchw_relaxed());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_large_nchw_relaxed_weight_as_input) {
     execute(grouped_conv2d::CreateModel_large_nchw_relaxed_weight_as_input,
             grouped_conv2d::is_ignored_large_nchw_relaxed_weight_as_input,
-            grouped_conv2d::examples_large_nchw_relaxed_weight_as_input);
+            grouped_conv2d::get_examples_large_nchw_relaxed_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_large_nchw_quant8) {
     execute(grouped_conv2d::CreateModel_large_nchw_quant8,
             grouped_conv2d::is_ignored_large_nchw_quant8,
-            grouped_conv2d::examples_large_nchw_quant8);
+            grouped_conv2d::get_examples_large_nchw_quant8());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_large_nchw_quant8_weight_as_input) {
     execute(grouped_conv2d::CreateModel_large_nchw_quant8_weight_as_input,
             grouped_conv2d::is_ignored_large_nchw_quant8_weight_as_input,
-            grouped_conv2d::examples_large_nchw_quant8_weight_as_input);
+            grouped_conv2d::get_examples_large_nchw_quant8_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_channel_nhwc) {
     execute(grouped_conv2d::CreateModel_channel_nhwc,
             grouped_conv2d::is_ignored_channel_nhwc,
-            grouped_conv2d::examples_channel_nhwc);
+            grouped_conv2d::get_examples_channel_nhwc());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_channel_nhwc_weight_as_input) {
     execute(grouped_conv2d::CreateModel_channel_nhwc_weight_as_input,
             grouped_conv2d::is_ignored_channel_nhwc_weight_as_input,
-            grouped_conv2d::examples_channel_nhwc_weight_as_input);
+            grouped_conv2d::get_examples_channel_nhwc_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_channel_nhwc_relaxed) {
     execute(grouped_conv2d::CreateModel_channel_nhwc_relaxed,
             grouped_conv2d::is_ignored_channel_nhwc_relaxed,
-            grouped_conv2d::examples_channel_nhwc_relaxed);
+            grouped_conv2d::get_examples_channel_nhwc_relaxed());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_channel_nhwc_relaxed_weight_as_input) {
     execute(grouped_conv2d::CreateModel_channel_nhwc_relaxed_weight_as_input,
             grouped_conv2d::is_ignored_channel_nhwc_relaxed_weight_as_input,
-            grouped_conv2d::examples_channel_nhwc_relaxed_weight_as_input);
+            grouped_conv2d::get_examples_channel_nhwc_relaxed_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_channel_nhwc_quant8) {
     execute(grouped_conv2d::CreateModel_channel_nhwc_quant8,
             grouped_conv2d::is_ignored_channel_nhwc_quant8,
-            grouped_conv2d::examples_channel_nhwc_quant8);
+            grouped_conv2d::get_examples_channel_nhwc_quant8());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_channel_nhwc_quant8_weight_as_input) {
     execute(grouped_conv2d::CreateModel_channel_nhwc_quant8_weight_as_input,
             grouped_conv2d::is_ignored_channel_nhwc_quant8_weight_as_input,
-            grouped_conv2d::examples_channel_nhwc_quant8_weight_as_input);
+            grouped_conv2d::get_examples_channel_nhwc_quant8_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_channel_nchw) {
     execute(grouped_conv2d::CreateModel_channel_nchw,
             grouped_conv2d::is_ignored_channel_nchw,
-            grouped_conv2d::examples_channel_nchw);
+            grouped_conv2d::get_examples_channel_nchw());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_channel_nchw_weight_as_input) {
     execute(grouped_conv2d::CreateModel_channel_nchw_weight_as_input,
             grouped_conv2d::is_ignored_channel_nchw_weight_as_input,
-            grouped_conv2d::examples_channel_nchw_weight_as_input);
+            grouped_conv2d::get_examples_channel_nchw_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_channel_nchw_relaxed) {
     execute(grouped_conv2d::CreateModel_channel_nchw_relaxed,
             grouped_conv2d::is_ignored_channel_nchw_relaxed,
-            grouped_conv2d::examples_channel_nchw_relaxed);
+            grouped_conv2d::get_examples_channel_nchw_relaxed());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_channel_nchw_relaxed_weight_as_input) {
     execute(grouped_conv2d::CreateModel_channel_nchw_relaxed_weight_as_input,
             grouped_conv2d::is_ignored_channel_nchw_relaxed_weight_as_input,
-            grouped_conv2d::examples_channel_nchw_relaxed_weight_as_input);
+            grouped_conv2d::get_examples_channel_nchw_relaxed_weight_as_input());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_channel_nchw_quant8) {
     execute(grouped_conv2d::CreateModel_channel_nchw_quant8,
             grouped_conv2d::is_ignored_channel_nchw_quant8,
-            grouped_conv2d::examples_channel_nchw_quant8);
+            grouped_conv2d::get_examples_channel_nchw_quant8());
 }
 
 TEST_F(GeneratedTests, grouped_conv2d_channel_nchw_quant8_weight_as_input) {
     execute(grouped_conv2d::CreateModel_channel_nchw_quant8_weight_as_input,
             grouped_conv2d::is_ignored_channel_nchw_quant8_weight_as_input,
-            grouped_conv2d::examples_channel_nchw_quant8_weight_as_input);
+            grouped_conv2d::get_examples_channel_nchw_quant8_weight_as_input());
 }
 
diff --git a/nn/runtime/test/generated/tests/hashtable_lookup_float.mod.py.cpp b/nn/runtime/test/generated/tests/hashtable_lookup_float.mod.py.cpp
index 41b620d..9acd20f 100644
--- a/nn/runtime/test/generated/tests/hashtable_lookup_float.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/hashtable_lookup_float.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, hashtable_lookup_float) {
     execute(hashtable_lookup_float::CreateModel,
             hashtable_lookup_float::is_ignored,
-            hashtable_lookup_float::examples);
+            hashtable_lookup_float::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/hashtable_lookup_float_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/hashtable_lookup_float_relaxed.mod.py.cpp
index 7d7356e..ddebee9 100644
--- a/nn/runtime/test/generated/tests/hashtable_lookup_float_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/hashtable_lookup_float_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, hashtable_lookup_float_relaxed) {
     execute(hashtable_lookup_float_relaxed::CreateModel,
             hashtable_lookup_float_relaxed::is_ignored,
-            hashtable_lookup_float_relaxed::examples);
+            hashtable_lookup_float_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/hashtable_lookup_quant8.mod.py.cpp b/nn/runtime/test/generated/tests/hashtable_lookup_quant8.mod.py.cpp
index 5bced5b..3b6b36c 100644
--- a/nn/runtime/test/generated/tests/hashtable_lookup_quant8.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/hashtable_lookup_quant8.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, hashtable_lookup_quant8) {
     execute(hashtable_lookup_quant8::CreateModel,
             hashtable_lookup_quant8::is_ignored,
-            hashtable_lookup_quant8::examples);
+            hashtable_lookup_quant8::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/heatmap_max_keypoint.mod.py.cpp b/nn/runtime/test/generated/tests/heatmap_max_keypoint.mod.py.cpp
index 128d6ac..ea686bb 100644
--- a/nn/runtime/test/generated/tests/heatmap_max_keypoint.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/heatmap_max_keypoint.mod.py.cpp
@@ -12,48 +12,48 @@
 TEST_F(GeneratedTests, heatmap_max_keypoint_nhwc) {
     execute(heatmap_max_keypoint::CreateModel_nhwc,
             heatmap_max_keypoint::is_ignored_nhwc,
-            heatmap_max_keypoint::examples_nhwc);
+            heatmap_max_keypoint::get_examples_nhwc());
 }
 
 TEST_F(GeneratedTests, heatmap_max_keypoint_nhwc_relaxed) {
     execute(heatmap_max_keypoint::CreateModel_nhwc_relaxed,
             heatmap_max_keypoint::is_ignored_nhwc_relaxed,
-            heatmap_max_keypoint::examples_nhwc_relaxed);
+            heatmap_max_keypoint::get_examples_nhwc_relaxed());
 }
 
 TEST_F(GeneratedTests, heatmap_max_keypoint_nchw) {
     execute(heatmap_max_keypoint::CreateModel_nchw,
             heatmap_max_keypoint::is_ignored_nchw,
-            heatmap_max_keypoint::examples_nchw);
+            heatmap_max_keypoint::get_examples_nchw());
 }
 
 TEST_F(GeneratedTests, heatmap_max_keypoint_nchw_relaxed) {
     execute(heatmap_max_keypoint::CreateModel_nchw_relaxed,
             heatmap_max_keypoint::is_ignored_nchw_relaxed,
-            heatmap_max_keypoint::examples_nchw_relaxed);
+            heatmap_max_keypoint::get_examples_nchw_relaxed());
 }
 
 TEST_F(GeneratedTests, heatmap_max_keypoint_nhwc_2) {
     execute(heatmap_max_keypoint::CreateModel_nhwc_2,
             heatmap_max_keypoint::is_ignored_nhwc_2,
-            heatmap_max_keypoint::examples_nhwc_2);
+            heatmap_max_keypoint::get_examples_nhwc_2());
 }
 
 TEST_F(GeneratedTests, heatmap_max_keypoint_nhwc_relaxed_2) {
     execute(heatmap_max_keypoint::CreateModel_nhwc_relaxed_2,
             heatmap_max_keypoint::is_ignored_nhwc_relaxed_2,
-            heatmap_max_keypoint::examples_nhwc_relaxed_2);
+            heatmap_max_keypoint::get_examples_nhwc_relaxed_2());
 }
 
 TEST_F(GeneratedTests, heatmap_max_keypoint_nchw_2) {
     execute(heatmap_max_keypoint::CreateModel_nchw_2,
             heatmap_max_keypoint::is_ignored_nchw_2,
-            heatmap_max_keypoint::examples_nchw_2);
+            heatmap_max_keypoint::get_examples_nchw_2());
 }
 
 TEST_F(GeneratedTests, heatmap_max_keypoint_nchw_relaxed_2) {
     execute(heatmap_max_keypoint::CreateModel_nchw_relaxed_2,
             heatmap_max_keypoint::is_ignored_nchw_relaxed_2,
-            heatmap_max_keypoint::examples_nchw_relaxed_2);
+            heatmap_max_keypoint::get_examples_nchw_relaxed_2());
 }
 
diff --git a/nn/runtime/test/generated/tests/l2_normalization.mod.py.cpp b/nn/runtime/test/generated/tests/l2_normalization.mod.py.cpp
index 1fc8c6c..7807515 100644
--- a/nn/runtime/test/generated/tests/l2_normalization.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/l2_normalization.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, l2_normalization) {
     execute(l2_normalization::CreateModel,
             l2_normalization::is_ignored,
-            l2_normalization::examples);
+            l2_normalization::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/l2_normalization_2.mod.py.cpp b/nn/runtime/test/generated/tests/l2_normalization_2.mod.py.cpp
index 760d5b1..75af663 100644
--- a/nn/runtime/test/generated/tests/l2_normalization_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/l2_normalization_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, l2_normalization_2) {
     execute(l2_normalization_2::CreateModel,
             l2_normalization_2::is_ignored,
-            l2_normalization_2::examples);
+            l2_normalization_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/l2_normalization_2_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/l2_normalization_2_relaxed.mod.py.cpp
index 3b16fc0..c105178 100644
--- a/nn/runtime/test/generated/tests/l2_normalization_2_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/l2_normalization_2_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, l2_normalization_2_relaxed) {
     execute(l2_normalization_2_relaxed::CreateModel,
             l2_normalization_2_relaxed::is_ignored,
-            l2_normalization_2_relaxed::examples);
+            l2_normalization_2_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/l2_normalization_large.mod.py.cpp b/nn/runtime/test/generated/tests/l2_normalization_large.mod.py.cpp
index 9bc329a..2e8a276 100644
--- a/nn/runtime/test/generated/tests/l2_normalization_large.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/l2_normalization_large.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, l2_normalization_large) {
     execute(l2_normalization_large::CreateModel,
             l2_normalization_large::is_ignored,
-            l2_normalization_large::examples);
+            l2_normalization_large::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/l2_normalization_large_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/l2_normalization_large_relaxed.mod.py.cpp
index 1e60c42..b95dcb3 100644
--- a/nn/runtime/test/generated/tests/l2_normalization_large_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/l2_normalization_large_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, l2_normalization_large_relaxed) {
     execute(l2_normalization_large_relaxed::CreateModel,
             l2_normalization_large_relaxed::is_ignored,
-            l2_normalization_large_relaxed::examples);
+            l2_normalization_large_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/l2_normalization_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/l2_normalization_relaxed.mod.py.cpp
index cd12a58..d3d9039 100644
--- a/nn/runtime/test/generated/tests/l2_normalization_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/l2_normalization_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, l2_normalization_relaxed) {
     execute(l2_normalization_relaxed::CreateModel,
             l2_normalization_relaxed::is_ignored,
-            l2_normalization_relaxed::examples);
+            l2_normalization_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/l2_normalization_v1_2.mod.py.cpp b/nn/runtime/test/generated/tests/l2_normalization_v1_2.mod.py.cpp
index c160ca7..1de3469 100644
--- a/nn/runtime/test/generated/tests/l2_normalization_v1_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/l2_normalization_v1_2.mod.py.cpp
@@ -12,276 +12,276 @@
 TEST_F(GeneratedTests, l2_normalization_v1_2_dim1_axis0) {
     execute(l2_normalization_v1_2::CreateModel_dim1_axis0,
             l2_normalization_v1_2::is_ignored_dim1_axis0,
-            l2_normalization_v1_2::examples_dim1_axis0);
+            l2_normalization_v1_2::get_examples_dim1_axis0());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_dim2_axis1) {
     execute(l2_normalization_v1_2::CreateModel_dim2_axis1,
             l2_normalization_v1_2::is_ignored_dim2_axis1,
-            l2_normalization_v1_2::examples_dim2_axis1);
+            l2_normalization_v1_2::get_examples_dim2_axis1());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_dim3_axis2) {
     execute(l2_normalization_v1_2::CreateModel_dim3_axis2,
             l2_normalization_v1_2::is_ignored_dim3_axis2,
-            l2_normalization_v1_2::examples_dim3_axis2);
+            l2_normalization_v1_2::get_examples_dim3_axis2());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_relaxed_dim1_axis0) {
     execute(l2_normalization_v1_2::CreateModel_relaxed_dim1_axis0,
             l2_normalization_v1_2::is_ignored_relaxed_dim1_axis0,
-            l2_normalization_v1_2::examples_relaxed_dim1_axis0);
+            l2_normalization_v1_2::get_examples_relaxed_dim1_axis0());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_relaxed_dim2_axis1) {
     execute(l2_normalization_v1_2::CreateModel_relaxed_dim2_axis1,
             l2_normalization_v1_2::is_ignored_relaxed_dim2_axis1,
-            l2_normalization_v1_2::examples_relaxed_dim2_axis1);
+            l2_normalization_v1_2::get_examples_relaxed_dim2_axis1());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_relaxed_dim3_axis2) {
     execute(l2_normalization_v1_2::CreateModel_relaxed_dim3_axis2,
             l2_normalization_v1_2::is_ignored_relaxed_dim3_axis2,
-            l2_normalization_v1_2::examples_relaxed_dim3_axis2);
+            l2_normalization_v1_2::get_examples_relaxed_dim3_axis2());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_dim4_axis0) {
     execute(l2_normalization_v1_2::CreateModel_axis_dim4_axis0,
             l2_normalization_v1_2::is_ignored_axis_dim4_axis0,
-            l2_normalization_v1_2::examples_axis_dim4_axis0);
+            l2_normalization_v1_2::get_examples_axis_dim4_axis0());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_dim4_axis0_neg) {
     execute(l2_normalization_v1_2::CreateModel_axis_dim4_axis0_neg,
             l2_normalization_v1_2::is_ignored_axis_dim4_axis0_neg,
-            l2_normalization_v1_2::examples_axis_dim4_axis0_neg);
+            l2_normalization_v1_2::get_examples_axis_dim4_axis0_neg());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_dim4_axis1) {
     execute(l2_normalization_v1_2::CreateModel_axis_dim4_axis1,
             l2_normalization_v1_2::is_ignored_axis_dim4_axis1,
-            l2_normalization_v1_2::examples_axis_dim4_axis1);
+            l2_normalization_v1_2::get_examples_axis_dim4_axis1());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_dim4_axis1_neg) {
     execute(l2_normalization_v1_2::CreateModel_axis_dim4_axis1_neg,
             l2_normalization_v1_2::is_ignored_axis_dim4_axis1_neg,
-            l2_normalization_v1_2::examples_axis_dim4_axis1_neg);
+            l2_normalization_v1_2::get_examples_axis_dim4_axis1_neg());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_dim4_axis2) {
     execute(l2_normalization_v1_2::CreateModel_axis_dim4_axis2,
             l2_normalization_v1_2::is_ignored_axis_dim4_axis2,
-            l2_normalization_v1_2::examples_axis_dim4_axis2);
+            l2_normalization_v1_2::get_examples_axis_dim4_axis2());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_dim4_axis2_neg) {
     execute(l2_normalization_v1_2::CreateModel_axis_dim4_axis2_neg,
             l2_normalization_v1_2::is_ignored_axis_dim4_axis2_neg,
-            l2_normalization_v1_2::examples_axis_dim4_axis2_neg);
+            l2_normalization_v1_2::get_examples_axis_dim4_axis2_neg());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_dim4_axis3) {
     execute(l2_normalization_v1_2::CreateModel_axis_dim4_axis3,
             l2_normalization_v1_2::is_ignored_axis_dim4_axis3,
-            l2_normalization_v1_2::examples_axis_dim4_axis3);
+            l2_normalization_v1_2::get_examples_axis_dim4_axis3());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_dim4_axis3_neg) {
     execute(l2_normalization_v1_2::CreateModel_axis_dim4_axis3_neg,
             l2_normalization_v1_2::is_ignored_axis_dim4_axis3_neg,
-            l2_normalization_v1_2::examples_axis_dim4_axis3_neg);
+            l2_normalization_v1_2::get_examples_axis_dim4_axis3_neg());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_dim3_axis0) {
     execute(l2_normalization_v1_2::CreateModel_axis_dim3_axis0,
             l2_normalization_v1_2::is_ignored_axis_dim3_axis0,
-            l2_normalization_v1_2::examples_axis_dim3_axis0);
+            l2_normalization_v1_2::get_examples_axis_dim3_axis0());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_dim3_axis0_neg) {
     execute(l2_normalization_v1_2::CreateModel_axis_dim3_axis0_neg,
             l2_normalization_v1_2::is_ignored_axis_dim3_axis0_neg,
-            l2_normalization_v1_2::examples_axis_dim3_axis0_neg);
+            l2_normalization_v1_2::get_examples_axis_dim3_axis0_neg());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_dim3_axis1) {
     execute(l2_normalization_v1_2::CreateModel_axis_dim3_axis1,
             l2_normalization_v1_2::is_ignored_axis_dim3_axis1,
-            l2_normalization_v1_2::examples_axis_dim3_axis1);
+            l2_normalization_v1_2::get_examples_axis_dim3_axis1());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_dim3_axis1_neg) {
     execute(l2_normalization_v1_2::CreateModel_axis_dim3_axis1_neg,
             l2_normalization_v1_2::is_ignored_axis_dim3_axis1_neg,
-            l2_normalization_v1_2::examples_axis_dim3_axis1_neg);
+            l2_normalization_v1_2::get_examples_axis_dim3_axis1_neg());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_dim3_axis2) {
     execute(l2_normalization_v1_2::CreateModel_axis_dim3_axis2,
             l2_normalization_v1_2::is_ignored_axis_dim3_axis2,
-            l2_normalization_v1_2::examples_axis_dim3_axis2);
+            l2_normalization_v1_2::get_examples_axis_dim3_axis2());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_dim3_axis2_neg) {
     execute(l2_normalization_v1_2::CreateModel_axis_dim3_axis2_neg,
             l2_normalization_v1_2::is_ignored_axis_dim3_axis2_neg,
-            l2_normalization_v1_2::examples_axis_dim3_axis2_neg);
+            l2_normalization_v1_2::get_examples_axis_dim3_axis2_neg());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_dim2_axis0) {
     execute(l2_normalization_v1_2::CreateModel_axis_dim2_axis0,
             l2_normalization_v1_2::is_ignored_axis_dim2_axis0,
-            l2_normalization_v1_2::examples_axis_dim2_axis0);
+            l2_normalization_v1_2::get_examples_axis_dim2_axis0());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_dim2_axis0_neg) {
     execute(l2_normalization_v1_2::CreateModel_axis_dim2_axis0_neg,
             l2_normalization_v1_2::is_ignored_axis_dim2_axis0_neg,
-            l2_normalization_v1_2::examples_axis_dim2_axis0_neg);
+            l2_normalization_v1_2::get_examples_axis_dim2_axis0_neg());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_dim2_axis1) {
     execute(l2_normalization_v1_2::CreateModel_axis_dim2_axis1,
             l2_normalization_v1_2::is_ignored_axis_dim2_axis1,
-            l2_normalization_v1_2::examples_axis_dim2_axis1);
+            l2_normalization_v1_2::get_examples_axis_dim2_axis1());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_dim2_axis1_neg) {
     execute(l2_normalization_v1_2::CreateModel_axis_dim2_axis1_neg,
             l2_normalization_v1_2::is_ignored_axis_dim2_axis1_neg,
-            l2_normalization_v1_2::examples_axis_dim2_axis1_neg);
+            l2_normalization_v1_2::get_examples_axis_dim2_axis1_neg());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_dim1_axis0) {
     execute(l2_normalization_v1_2::CreateModel_axis_dim1_axis0,
             l2_normalization_v1_2::is_ignored_axis_dim1_axis0,
-            l2_normalization_v1_2::examples_axis_dim1_axis0);
+            l2_normalization_v1_2::get_examples_axis_dim1_axis0());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_dim1_axis0_neg) {
     execute(l2_normalization_v1_2::CreateModel_axis_dim1_axis0_neg,
             l2_normalization_v1_2::is_ignored_axis_dim1_axis0_neg,
-            l2_normalization_v1_2::examples_axis_dim1_axis0_neg);
+            l2_normalization_v1_2::get_examples_axis_dim1_axis0_neg());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_relaxed_dim4_axis0) {
     execute(l2_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis0,
             l2_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis0,
-            l2_normalization_v1_2::examples_axis_relaxed_dim4_axis0);
+            l2_normalization_v1_2::get_examples_axis_relaxed_dim4_axis0());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_relaxed_dim4_axis0_neg) {
     execute(l2_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis0_neg,
             l2_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis0_neg,
-            l2_normalization_v1_2::examples_axis_relaxed_dim4_axis0_neg);
+            l2_normalization_v1_2::get_examples_axis_relaxed_dim4_axis0_neg());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_relaxed_dim4_axis1) {
     execute(l2_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis1,
             l2_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis1,
-            l2_normalization_v1_2::examples_axis_relaxed_dim4_axis1);
+            l2_normalization_v1_2::get_examples_axis_relaxed_dim4_axis1());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_relaxed_dim4_axis1_neg) {
     execute(l2_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis1_neg,
             l2_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis1_neg,
-            l2_normalization_v1_2::examples_axis_relaxed_dim4_axis1_neg);
+            l2_normalization_v1_2::get_examples_axis_relaxed_dim4_axis1_neg());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_relaxed_dim4_axis2) {
     execute(l2_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis2,
             l2_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis2,
-            l2_normalization_v1_2::examples_axis_relaxed_dim4_axis2);
+            l2_normalization_v1_2::get_examples_axis_relaxed_dim4_axis2());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_relaxed_dim4_axis2_neg) {
     execute(l2_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis2_neg,
             l2_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis2_neg,
-            l2_normalization_v1_2::examples_axis_relaxed_dim4_axis2_neg);
+            l2_normalization_v1_2::get_examples_axis_relaxed_dim4_axis2_neg());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_relaxed_dim4_axis3) {
     execute(l2_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis3,
             l2_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis3,
-            l2_normalization_v1_2::examples_axis_relaxed_dim4_axis3);
+            l2_normalization_v1_2::get_examples_axis_relaxed_dim4_axis3());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_relaxed_dim4_axis3_neg) {
     execute(l2_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis3_neg,
             l2_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis3_neg,
-            l2_normalization_v1_2::examples_axis_relaxed_dim4_axis3_neg);
+            l2_normalization_v1_2::get_examples_axis_relaxed_dim4_axis3_neg());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_relaxed_dim3_axis0) {
     execute(l2_normalization_v1_2::CreateModel_axis_relaxed_dim3_axis0,
             l2_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis0,
-            l2_normalization_v1_2::examples_axis_relaxed_dim3_axis0);
+            l2_normalization_v1_2::get_examples_axis_relaxed_dim3_axis0());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_relaxed_dim3_axis0_neg) {
     execute(l2_normalization_v1_2::CreateModel_axis_relaxed_dim3_axis0_neg,
             l2_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis0_neg,
-            l2_normalization_v1_2::examples_axis_relaxed_dim3_axis0_neg);
+            l2_normalization_v1_2::get_examples_axis_relaxed_dim3_axis0_neg());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_relaxed_dim3_axis1) {
     execute(l2_normalization_v1_2::CreateModel_axis_relaxed_dim3_axis1,
             l2_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis1,
-            l2_normalization_v1_2::examples_axis_relaxed_dim3_axis1);
+            l2_normalization_v1_2::get_examples_axis_relaxed_dim3_axis1());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_relaxed_dim3_axis1_neg) {
     execute(l2_normalization_v1_2::CreateModel_axis_relaxed_dim3_axis1_neg,
             l2_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis1_neg,
-            l2_normalization_v1_2::examples_axis_relaxed_dim3_axis1_neg);
+            l2_normalization_v1_2::get_examples_axis_relaxed_dim3_axis1_neg());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_relaxed_dim3_axis2) {
     execute(l2_normalization_v1_2::CreateModel_axis_relaxed_dim3_axis2,
             l2_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis2,
-            l2_normalization_v1_2::examples_axis_relaxed_dim3_axis2);
+            l2_normalization_v1_2::get_examples_axis_relaxed_dim3_axis2());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_relaxed_dim3_axis2_neg) {
     execute(l2_normalization_v1_2::CreateModel_axis_relaxed_dim3_axis2_neg,
             l2_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis2_neg,
-            l2_normalization_v1_2::examples_axis_relaxed_dim3_axis2_neg);
+            l2_normalization_v1_2::get_examples_axis_relaxed_dim3_axis2_neg());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_relaxed_dim2_axis0) {
     execute(l2_normalization_v1_2::CreateModel_axis_relaxed_dim2_axis0,
             l2_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis0,
-            l2_normalization_v1_2::examples_axis_relaxed_dim2_axis0);
+            l2_normalization_v1_2::get_examples_axis_relaxed_dim2_axis0());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_relaxed_dim2_axis0_neg) {
     execute(l2_normalization_v1_2::CreateModel_axis_relaxed_dim2_axis0_neg,
             l2_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis0_neg,
-            l2_normalization_v1_2::examples_axis_relaxed_dim2_axis0_neg);
+            l2_normalization_v1_2::get_examples_axis_relaxed_dim2_axis0_neg());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_relaxed_dim2_axis1) {
     execute(l2_normalization_v1_2::CreateModel_axis_relaxed_dim2_axis1,
             l2_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis1,
-            l2_normalization_v1_2::examples_axis_relaxed_dim2_axis1);
+            l2_normalization_v1_2::get_examples_axis_relaxed_dim2_axis1());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_relaxed_dim2_axis1_neg) {
     execute(l2_normalization_v1_2::CreateModel_axis_relaxed_dim2_axis1_neg,
             l2_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis1_neg,
-            l2_normalization_v1_2::examples_axis_relaxed_dim2_axis1_neg);
+            l2_normalization_v1_2::get_examples_axis_relaxed_dim2_axis1_neg());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_relaxed_dim1_axis0) {
     execute(l2_normalization_v1_2::CreateModel_axis_relaxed_dim1_axis0,
             l2_normalization_v1_2::is_ignored_axis_relaxed_dim1_axis0,
-            l2_normalization_v1_2::examples_axis_relaxed_dim1_axis0);
+            l2_normalization_v1_2::get_examples_axis_relaxed_dim1_axis0());
 }
 
 TEST_F(GeneratedTests, l2_normalization_v1_2_axis_relaxed_dim1_axis0_neg) {
     execute(l2_normalization_v1_2::CreateModel_axis_relaxed_dim1_axis0_neg,
             l2_normalization_v1_2::is_ignored_axis_relaxed_dim1_axis0_neg,
-            l2_normalization_v1_2::examples_axis_relaxed_dim1_axis0_neg);
+            l2_normalization_v1_2::get_examples_axis_relaxed_dim1_axis0_neg());
 }
 
diff --git a/nn/runtime/test/generated/tests/l2_pool_float.mod.py.cpp b/nn/runtime/test/generated/tests/l2_pool_float.mod.py.cpp
index fe8dfb9..a9b6ec0 100644
--- a/nn/runtime/test/generated/tests/l2_pool_float.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/l2_pool_float.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, l2_pool_float) {
     execute(l2_pool_float::CreateModel,
             l2_pool_float::is_ignored,
-            l2_pool_float::examples);
+            l2_pool_float::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/l2_pool_float_2.mod.py.cpp b/nn/runtime/test/generated/tests/l2_pool_float_2.mod.py.cpp
index 5032a2f..03b7cbd 100644
--- a/nn/runtime/test/generated/tests/l2_pool_float_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/l2_pool_float_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, l2_pool_float_2) {
     execute(l2_pool_float_2::CreateModel,
             l2_pool_float_2::is_ignored,
-            l2_pool_float_2::examples);
+            l2_pool_float_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/l2_pool_float_2_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/l2_pool_float_2_relaxed.mod.py.cpp
index bc7c9b3..67c8edb 100644
--- a/nn/runtime/test/generated/tests/l2_pool_float_2_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/l2_pool_float_2_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, l2_pool_float_2_relaxed) {
     execute(l2_pool_float_2_relaxed::CreateModel,
             l2_pool_float_2_relaxed::is_ignored,
-            l2_pool_float_2_relaxed::examples);
+            l2_pool_float_2_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/l2_pool_float_large.mod.py.cpp b/nn/runtime/test/generated/tests/l2_pool_float_large.mod.py.cpp
index 3396ba7..7bc9aa3 100644
--- a/nn/runtime/test/generated/tests/l2_pool_float_large.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/l2_pool_float_large.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, l2_pool_float_large) {
     execute(l2_pool_float_large::CreateModel,
             l2_pool_float_large::is_ignored,
-            l2_pool_float_large::examples);
+            l2_pool_float_large::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/l2_pool_float_large_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/l2_pool_float_large_relaxed.mod.py.cpp
index f243d8b..eb36ce5 100644
--- a/nn/runtime/test/generated/tests/l2_pool_float_large_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/l2_pool_float_large_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, l2_pool_float_large_relaxed) {
     execute(l2_pool_float_large_relaxed::CreateModel,
             l2_pool_float_large_relaxed::is_ignored,
-            l2_pool_float_large_relaxed::examples);
+            l2_pool_float_large_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/l2_pool_float_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/l2_pool_float_relaxed.mod.py.cpp
index c631dd1..3a0de8c 100644
--- a/nn/runtime/test/generated/tests/l2_pool_float_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/l2_pool_float_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, l2_pool_float_relaxed) {
     execute(l2_pool_float_relaxed::CreateModel,
             l2_pool_float_relaxed::is_ignored,
-            l2_pool_float_relaxed::examples);
+            l2_pool_float_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/l2_pool_v1_2.mod.py.cpp b/nn/runtime/test/generated/tests/l2_pool_v1_2.mod.py.cpp
index 263087d..1a5ee95 100644
--- a/nn/runtime/test/generated/tests/l2_pool_v1_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/l2_pool_v1_2.mod.py.cpp
@@ -12,72 +12,72 @@
 TEST_F(GeneratedTests, l2_pool_v1_2_nhwc) {
     execute(l2_pool_v1_2::CreateModel_nhwc,
             l2_pool_v1_2::is_ignored_nhwc,
-            l2_pool_v1_2::examples_nhwc);
+            l2_pool_v1_2::get_examples_nhwc());
 }
 
 TEST_F(GeneratedTests, l2_pool_v1_2_nhwc_relaxed) {
     execute(l2_pool_v1_2::CreateModel_nhwc_relaxed,
             l2_pool_v1_2::is_ignored_nhwc_relaxed,
-            l2_pool_v1_2::examples_nhwc_relaxed);
+            l2_pool_v1_2::get_examples_nhwc_relaxed());
 }
 
 TEST_F(GeneratedTests, l2_pool_v1_2_nchw) {
     execute(l2_pool_v1_2::CreateModel_nchw,
             l2_pool_v1_2::is_ignored_nchw,
-            l2_pool_v1_2::examples_nchw);
+            l2_pool_v1_2::get_examples_nchw());
 }
 
 TEST_F(GeneratedTests, l2_pool_v1_2_nchw_relaxed) {
     execute(l2_pool_v1_2::CreateModel_nchw_relaxed,
             l2_pool_v1_2::is_ignored_nchw_relaxed,
-            l2_pool_v1_2::examples_nchw_relaxed);
+            l2_pool_v1_2::get_examples_nchw_relaxed());
 }
 
 TEST_F(GeneratedTests, l2_pool_v1_2_nhwc_2) {
     execute(l2_pool_v1_2::CreateModel_nhwc_2,
             l2_pool_v1_2::is_ignored_nhwc_2,
-            l2_pool_v1_2::examples_nhwc_2);
+            l2_pool_v1_2::get_examples_nhwc_2());
 }
 
 TEST_F(GeneratedTests, l2_pool_v1_2_nhwc_relaxed_2) {
     execute(l2_pool_v1_2::CreateModel_nhwc_relaxed_2,
             l2_pool_v1_2::is_ignored_nhwc_relaxed_2,
-            l2_pool_v1_2::examples_nhwc_relaxed_2);
+            l2_pool_v1_2::get_examples_nhwc_relaxed_2());
 }
 
 TEST_F(GeneratedTests, l2_pool_v1_2_nchw_2) {
     execute(l2_pool_v1_2::CreateModel_nchw_2,
             l2_pool_v1_2::is_ignored_nchw_2,
-            l2_pool_v1_2::examples_nchw_2);
+            l2_pool_v1_2::get_examples_nchw_2());
 }
 
 TEST_F(GeneratedTests, l2_pool_v1_2_nchw_relaxed_2) {
     execute(l2_pool_v1_2::CreateModel_nchw_relaxed_2,
             l2_pool_v1_2::is_ignored_nchw_relaxed_2,
-            l2_pool_v1_2::examples_nchw_relaxed_2);
+            l2_pool_v1_2::get_examples_nchw_relaxed_2());
 }
 
 TEST_F(GeneratedTests, l2_pool_v1_2_large_nhwc) {
     execute(l2_pool_v1_2::CreateModel_large_nhwc,
             l2_pool_v1_2::is_ignored_large_nhwc,
-            l2_pool_v1_2::examples_large_nhwc);
+            l2_pool_v1_2::get_examples_large_nhwc());
 }
 
 TEST_F(GeneratedTests, l2_pool_v1_2_large_nhwc_relaxed) {
     execute(l2_pool_v1_2::CreateModel_large_nhwc_relaxed,
             l2_pool_v1_2::is_ignored_large_nhwc_relaxed,
-            l2_pool_v1_2::examples_large_nhwc_relaxed);
+            l2_pool_v1_2::get_examples_large_nhwc_relaxed());
 }
 
 TEST_F(GeneratedTests, l2_pool_v1_2_large_nchw) {
     execute(l2_pool_v1_2::CreateModel_large_nchw,
             l2_pool_v1_2::is_ignored_large_nchw,
-            l2_pool_v1_2::examples_large_nchw);
+            l2_pool_v1_2::get_examples_large_nchw());
 }
 
 TEST_F(GeneratedTests, l2_pool_v1_2_large_nchw_relaxed) {
     execute(l2_pool_v1_2::CreateModel_large_nchw_relaxed,
             l2_pool_v1_2::is_ignored_large_nchw_relaxed,
-            l2_pool_v1_2::examples_large_nchw_relaxed);
+            l2_pool_v1_2::get_examples_large_nchw_relaxed());
 }
 
diff --git a/nn/runtime/test/generated/tests/layer_norm_lstm.mod.py.cpp b/nn/runtime/test/generated/tests/layer_norm_lstm.mod.py.cpp
index 5cc89db..710f3ce 100644
--- a/nn/runtime/test/generated/tests/layer_norm_lstm.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/layer_norm_lstm.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, layer_norm_lstm) {
     execute(layer_norm_lstm::CreateModel,
             layer_norm_lstm::is_ignored,
-            layer_norm_lstm::examples);
+            layer_norm_lstm::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/local_response_norm_float_1.mod.py.cpp b/nn/runtime/test/generated/tests/local_response_norm_float_1.mod.py.cpp
index 7282ccc..1bd8fc9 100644
--- a/nn/runtime/test/generated/tests/local_response_norm_float_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/local_response_norm_float_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, local_response_norm_float_1) {
     execute(local_response_norm_float_1::CreateModel,
             local_response_norm_float_1::is_ignored,
-            local_response_norm_float_1::examples);
+            local_response_norm_float_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/local_response_norm_float_1_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/local_response_norm_float_1_relaxed.mod.py.cpp
index fd213fa..4762bda 100644
--- a/nn/runtime/test/generated/tests/local_response_norm_float_1_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/local_response_norm_float_1_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, local_response_norm_float_1_relaxed) {
     execute(local_response_norm_float_1_relaxed::CreateModel,
             local_response_norm_float_1_relaxed::is_ignored,
-            local_response_norm_float_1_relaxed::examples);
+            local_response_norm_float_1_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/local_response_norm_float_2.mod.py.cpp b/nn/runtime/test/generated/tests/local_response_norm_float_2.mod.py.cpp
index 091585f..b83b98d 100644
--- a/nn/runtime/test/generated/tests/local_response_norm_float_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/local_response_norm_float_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, local_response_norm_float_2) {
     execute(local_response_norm_float_2::CreateModel,
             local_response_norm_float_2::is_ignored,
-            local_response_norm_float_2::examples);
+            local_response_norm_float_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/local_response_norm_float_2_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/local_response_norm_float_2_relaxed.mod.py.cpp
index cab8b5c..c0898b7 100644
--- a/nn/runtime/test/generated/tests/local_response_norm_float_2_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/local_response_norm_float_2_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, local_response_norm_float_2_relaxed) {
     execute(local_response_norm_float_2_relaxed::CreateModel,
             local_response_norm_float_2_relaxed::is_ignored,
-            local_response_norm_float_2_relaxed::examples);
+            local_response_norm_float_2_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/local_response_norm_float_3.mod.py.cpp b/nn/runtime/test/generated/tests/local_response_norm_float_3.mod.py.cpp
index 08f8c87..3c017bc 100644
--- a/nn/runtime/test/generated/tests/local_response_norm_float_3.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/local_response_norm_float_3.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, local_response_norm_float_3) {
     execute(local_response_norm_float_3::CreateModel,
             local_response_norm_float_3::is_ignored,
-            local_response_norm_float_3::examples);
+            local_response_norm_float_3::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/local_response_norm_float_3_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/local_response_norm_float_3_relaxed.mod.py.cpp
index b799b34..a17c156 100644
--- a/nn/runtime/test/generated/tests/local_response_norm_float_3_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/local_response_norm_float_3_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, local_response_norm_float_3_relaxed) {
     execute(local_response_norm_float_3_relaxed::CreateModel,
             local_response_norm_float_3_relaxed::is_ignored,
-            local_response_norm_float_3_relaxed::examples);
+            local_response_norm_float_3_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/local_response_norm_float_4.mod.py.cpp b/nn/runtime/test/generated/tests/local_response_norm_float_4.mod.py.cpp
index 4bb303b..21d4357 100644
--- a/nn/runtime/test/generated/tests/local_response_norm_float_4.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/local_response_norm_float_4.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, local_response_norm_float_4) {
     execute(local_response_norm_float_4::CreateModel,
             local_response_norm_float_4::is_ignored,
-            local_response_norm_float_4::examples);
+            local_response_norm_float_4::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/local_response_norm_float_4_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/local_response_norm_float_4_relaxed.mod.py.cpp
index 6481470..30febe1 100644
--- a/nn/runtime/test/generated/tests/local_response_norm_float_4_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/local_response_norm_float_4_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, local_response_norm_float_4_relaxed) {
     execute(local_response_norm_float_4_relaxed::CreateModel,
             local_response_norm_float_4_relaxed::is_ignored,
-            local_response_norm_float_4_relaxed::examples);
+            local_response_norm_float_4_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/local_response_normalization_v1_2.mod.py.cpp b/nn/runtime/test/generated/tests/local_response_normalization_v1_2.mod.py.cpp
index eef6f86..160da4c 100644
--- a/nn/runtime/test/generated/tests/local_response_normalization_v1_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/local_response_normalization_v1_2.mod.py.cpp
@@ -12,756 +12,756 @@
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim4_axis0) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim4_axis0,
             local_response_normalization_v1_2::is_ignored_axis_dim4_axis0,
-            local_response_normalization_v1_2::examples_axis_dim4_axis0);
+            local_response_normalization_v1_2::get_examples_axis_dim4_axis0());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim4_axis0_neg) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim4_axis0_neg,
             local_response_normalization_v1_2::is_ignored_axis_dim4_axis0_neg,
-            local_response_normalization_v1_2::examples_axis_dim4_axis0_neg);
+            local_response_normalization_v1_2::get_examples_axis_dim4_axis0_neg());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim4_axis1) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim4_axis1,
             local_response_normalization_v1_2::is_ignored_axis_dim4_axis1,
-            local_response_normalization_v1_2::examples_axis_dim4_axis1);
+            local_response_normalization_v1_2::get_examples_axis_dim4_axis1());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim4_axis1_neg) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim4_axis1_neg,
             local_response_normalization_v1_2::is_ignored_axis_dim4_axis1_neg,
-            local_response_normalization_v1_2::examples_axis_dim4_axis1_neg);
+            local_response_normalization_v1_2::get_examples_axis_dim4_axis1_neg());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim4_axis2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim4_axis2,
             local_response_normalization_v1_2::is_ignored_axis_dim4_axis2,
-            local_response_normalization_v1_2::examples_axis_dim4_axis2);
+            local_response_normalization_v1_2::get_examples_axis_dim4_axis2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim4_axis2_neg) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim4_axis2_neg,
             local_response_normalization_v1_2::is_ignored_axis_dim4_axis2_neg,
-            local_response_normalization_v1_2::examples_axis_dim4_axis2_neg);
+            local_response_normalization_v1_2::get_examples_axis_dim4_axis2_neg());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim4_axis3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim4_axis3,
             local_response_normalization_v1_2::is_ignored_axis_dim4_axis3,
-            local_response_normalization_v1_2::examples_axis_dim4_axis3);
+            local_response_normalization_v1_2::get_examples_axis_dim4_axis3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim4_axis3_neg) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim4_axis3_neg,
             local_response_normalization_v1_2::is_ignored_axis_dim4_axis3_neg,
-            local_response_normalization_v1_2::examples_axis_dim4_axis3_neg);
+            local_response_normalization_v1_2::get_examples_axis_dim4_axis3_neg());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim3_axis0) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim3_axis0,
             local_response_normalization_v1_2::is_ignored_axis_dim3_axis0,
-            local_response_normalization_v1_2::examples_axis_dim3_axis0);
+            local_response_normalization_v1_2::get_examples_axis_dim3_axis0());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim3_axis0_neg) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim3_axis0_neg,
             local_response_normalization_v1_2::is_ignored_axis_dim3_axis0_neg,
-            local_response_normalization_v1_2::examples_axis_dim3_axis0_neg);
+            local_response_normalization_v1_2::get_examples_axis_dim3_axis0_neg());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim3_axis1) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim3_axis1,
             local_response_normalization_v1_2::is_ignored_axis_dim3_axis1,
-            local_response_normalization_v1_2::examples_axis_dim3_axis1);
+            local_response_normalization_v1_2::get_examples_axis_dim3_axis1());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim3_axis1_neg) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim3_axis1_neg,
             local_response_normalization_v1_2::is_ignored_axis_dim3_axis1_neg,
-            local_response_normalization_v1_2::examples_axis_dim3_axis1_neg);
+            local_response_normalization_v1_2::get_examples_axis_dim3_axis1_neg());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim3_axis2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim3_axis2,
             local_response_normalization_v1_2::is_ignored_axis_dim3_axis2,
-            local_response_normalization_v1_2::examples_axis_dim3_axis2);
+            local_response_normalization_v1_2::get_examples_axis_dim3_axis2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim3_axis2_neg) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim3_axis2_neg,
             local_response_normalization_v1_2::is_ignored_axis_dim3_axis2_neg,
-            local_response_normalization_v1_2::examples_axis_dim3_axis2_neg);
+            local_response_normalization_v1_2::get_examples_axis_dim3_axis2_neg());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim2_axis0) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim2_axis0,
             local_response_normalization_v1_2::is_ignored_axis_dim2_axis0,
-            local_response_normalization_v1_2::examples_axis_dim2_axis0);
+            local_response_normalization_v1_2::get_examples_axis_dim2_axis0());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim2_axis0_neg) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim2_axis0_neg,
             local_response_normalization_v1_2::is_ignored_axis_dim2_axis0_neg,
-            local_response_normalization_v1_2::examples_axis_dim2_axis0_neg);
+            local_response_normalization_v1_2::get_examples_axis_dim2_axis0_neg());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim2_axis1) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim2_axis1,
             local_response_normalization_v1_2::is_ignored_axis_dim2_axis1,
-            local_response_normalization_v1_2::examples_axis_dim2_axis1);
+            local_response_normalization_v1_2::get_examples_axis_dim2_axis1());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim2_axis1_neg) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim2_axis1_neg,
             local_response_normalization_v1_2::is_ignored_axis_dim2_axis1_neg,
-            local_response_normalization_v1_2::examples_axis_dim2_axis1_neg);
+            local_response_normalization_v1_2::get_examples_axis_dim2_axis1_neg());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim1_axis0) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim1_axis0,
             local_response_normalization_v1_2::is_ignored_axis_dim1_axis0,
-            local_response_normalization_v1_2::examples_axis_dim1_axis0);
+            local_response_normalization_v1_2::get_examples_axis_dim1_axis0());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim1_axis0_neg) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim1_axis0_neg,
             local_response_normalization_v1_2::is_ignored_axis_dim1_axis0_neg,
-            local_response_normalization_v1_2::examples_axis_dim1_axis0_neg);
+            local_response_normalization_v1_2::get_examples_axis_dim1_axis0_neg());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim4_axis0) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis0,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis0,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis0);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis0());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim4_axis0_neg) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis0_neg,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis0_neg,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis0_neg);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis0_neg());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim4_axis1) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis1,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis1,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis1);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis1());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim4_axis1_neg) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis1_neg,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis1_neg,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis1_neg);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis1_neg());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim4_axis2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis2,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis2,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis2);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim4_axis2_neg) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis2_neg,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis2_neg,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis2_neg);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis2_neg());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim4_axis3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis3,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis3,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis3);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim4_axis3_neg) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis3_neg,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis3_neg,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis3_neg);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis3_neg());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim3_axis0) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim3_axis0,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis0,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis0);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis0());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim3_axis0_neg) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim3_axis0_neg,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis0_neg,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis0_neg);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis0_neg());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim3_axis1) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim3_axis1,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis1,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis1);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis1());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim3_axis1_neg) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim3_axis1_neg,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis1_neg,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis1_neg);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis1_neg());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim3_axis2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim3_axis2,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis2,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis2);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim3_axis2_neg) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim3_axis2_neg,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis2_neg,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis2_neg);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis2_neg());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim2_axis0) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim2_axis0,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis0,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis0);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis0());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim2_axis0_neg) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim2_axis0_neg,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis0_neg,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis0_neg);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis0_neg());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim2_axis1) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim2_axis1,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis1,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis1);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis1());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim2_axis1_neg) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim2_axis1_neg,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis1_neg,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis1_neg);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis1_neg());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim1_axis0) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim1_axis0,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim1_axis0,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim1_axis0);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim1_axis0());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim1_axis0_neg) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim1_axis0_neg,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim1_axis0_neg,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim1_axis0_neg);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim1_axis0_neg());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim4_axis0_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim4_axis0_2,
             local_response_normalization_v1_2::is_ignored_axis_dim4_axis0_2,
-            local_response_normalization_v1_2::examples_axis_dim4_axis0_2);
+            local_response_normalization_v1_2::get_examples_axis_dim4_axis0_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim4_axis0_neg_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim4_axis0_neg_2,
             local_response_normalization_v1_2::is_ignored_axis_dim4_axis0_neg_2,
-            local_response_normalization_v1_2::examples_axis_dim4_axis0_neg_2);
+            local_response_normalization_v1_2::get_examples_axis_dim4_axis0_neg_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim4_axis1_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim4_axis1_2,
             local_response_normalization_v1_2::is_ignored_axis_dim4_axis1_2,
-            local_response_normalization_v1_2::examples_axis_dim4_axis1_2);
+            local_response_normalization_v1_2::get_examples_axis_dim4_axis1_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim4_axis1_neg_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim4_axis1_neg_2,
             local_response_normalization_v1_2::is_ignored_axis_dim4_axis1_neg_2,
-            local_response_normalization_v1_2::examples_axis_dim4_axis1_neg_2);
+            local_response_normalization_v1_2::get_examples_axis_dim4_axis1_neg_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim4_axis2_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim4_axis2_2,
             local_response_normalization_v1_2::is_ignored_axis_dim4_axis2_2,
-            local_response_normalization_v1_2::examples_axis_dim4_axis2_2);
+            local_response_normalization_v1_2::get_examples_axis_dim4_axis2_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim4_axis2_neg_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim4_axis2_neg_2,
             local_response_normalization_v1_2::is_ignored_axis_dim4_axis2_neg_2,
-            local_response_normalization_v1_2::examples_axis_dim4_axis2_neg_2);
+            local_response_normalization_v1_2::get_examples_axis_dim4_axis2_neg_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim4_axis3_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim4_axis3_2,
             local_response_normalization_v1_2::is_ignored_axis_dim4_axis3_2,
-            local_response_normalization_v1_2::examples_axis_dim4_axis3_2);
+            local_response_normalization_v1_2::get_examples_axis_dim4_axis3_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim4_axis3_neg_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim4_axis3_neg_2,
             local_response_normalization_v1_2::is_ignored_axis_dim4_axis3_neg_2,
-            local_response_normalization_v1_2::examples_axis_dim4_axis3_neg_2);
+            local_response_normalization_v1_2::get_examples_axis_dim4_axis3_neg_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim3_axis0_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim3_axis0_2,
             local_response_normalization_v1_2::is_ignored_axis_dim3_axis0_2,
-            local_response_normalization_v1_2::examples_axis_dim3_axis0_2);
+            local_response_normalization_v1_2::get_examples_axis_dim3_axis0_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim3_axis0_neg_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim3_axis0_neg_2,
             local_response_normalization_v1_2::is_ignored_axis_dim3_axis0_neg_2,
-            local_response_normalization_v1_2::examples_axis_dim3_axis0_neg_2);
+            local_response_normalization_v1_2::get_examples_axis_dim3_axis0_neg_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim3_axis1_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim3_axis1_2,
             local_response_normalization_v1_2::is_ignored_axis_dim3_axis1_2,
-            local_response_normalization_v1_2::examples_axis_dim3_axis1_2);
+            local_response_normalization_v1_2::get_examples_axis_dim3_axis1_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim3_axis1_neg_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim3_axis1_neg_2,
             local_response_normalization_v1_2::is_ignored_axis_dim3_axis1_neg_2,
-            local_response_normalization_v1_2::examples_axis_dim3_axis1_neg_2);
+            local_response_normalization_v1_2::get_examples_axis_dim3_axis1_neg_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim3_axis2_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim3_axis2_2,
             local_response_normalization_v1_2::is_ignored_axis_dim3_axis2_2,
-            local_response_normalization_v1_2::examples_axis_dim3_axis2_2);
+            local_response_normalization_v1_2::get_examples_axis_dim3_axis2_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim3_axis2_neg_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim3_axis2_neg_2,
             local_response_normalization_v1_2::is_ignored_axis_dim3_axis2_neg_2,
-            local_response_normalization_v1_2::examples_axis_dim3_axis2_neg_2);
+            local_response_normalization_v1_2::get_examples_axis_dim3_axis2_neg_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim2_axis0_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim2_axis0_2,
             local_response_normalization_v1_2::is_ignored_axis_dim2_axis0_2,
-            local_response_normalization_v1_2::examples_axis_dim2_axis0_2);
+            local_response_normalization_v1_2::get_examples_axis_dim2_axis0_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim2_axis0_neg_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim2_axis0_neg_2,
             local_response_normalization_v1_2::is_ignored_axis_dim2_axis0_neg_2,
-            local_response_normalization_v1_2::examples_axis_dim2_axis0_neg_2);
+            local_response_normalization_v1_2::get_examples_axis_dim2_axis0_neg_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim2_axis1_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim2_axis1_2,
             local_response_normalization_v1_2::is_ignored_axis_dim2_axis1_2,
-            local_response_normalization_v1_2::examples_axis_dim2_axis1_2);
+            local_response_normalization_v1_2::get_examples_axis_dim2_axis1_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim2_axis1_neg_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim2_axis1_neg_2,
             local_response_normalization_v1_2::is_ignored_axis_dim2_axis1_neg_2,
-            local_response_normalization_v1_2::examples_axis_dim2_axis1_neg_2);
+            local_response_normalization_v1_2::get_examples_axis_dim2_axis1_neg_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim1_axis0_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim1_axis0_2,
             local_response_normalization_v1_2::is_ignored_axis_dim1_axis0_2,
-            local_response_normalization_v1_2::examples_axis_dim1_axis0_2);
+            local_response_normalization_v1_2::get_examples_axis_dim1_axis0_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim1_axis0_neg_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim1_axis0_neg_2,
             local_response_normalization_v1_2::is_ignored_axis_dim1_axis0_neg_2,
-            local_response_normalization_v1_2::examples_axis_dim1_axis0_neg_2);
+            local_response_normalization_v1_2::get_examples_axis_dim1_axis0_neg_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim4_axis0_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis0_2,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis0_2,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis0_2);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis0_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim4_axis0_neg_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis0_neg_2,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis0_neg_2,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis0_neg_2);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis0_neg_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim4_axis1_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis1_2,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis1_2,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis1_2);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis1_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim4_axis1_neg_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis1_neg_2,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis1_neg_2,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis1_neg_2);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis1_neg_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim4_axis2_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis2_2,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis2_2,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis2_2);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis2_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim4_axis2_neg_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis2_neg_2,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis2_neg_2,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis2_neg_2);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis2_neg_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim4_axis3_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis3_2,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis3_2,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis3_2);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis3_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim4_axis3_neg_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis3_neg_2,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis3_neg_2,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis3_neg_2);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis3_neg_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim3_axis0_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim3_axis0_2,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis0_2,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis0_2);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis0_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim3_axis0_neg_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim3_axis0_neg_2,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis0_neg_2,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis0_neg_2);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis0_neg_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim3_axis1_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim3_axis1_2,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis1_2,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis1_2);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis1_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim3_axis1_neg_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim3_axis1_neg_2,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis1_neg_2,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis1_neg_2);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis1_neg_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim3_axis2_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim3_axis2_2,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis2_2,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis2_2);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis2_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim3_axis2_neg_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim3_axis2_neg_2,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis2_neg_2,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis2_neg_2);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis2_neg_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim2_axis0_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim2_axis0_2,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis0_2,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis0_2);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis0_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim2_axis0_neg_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim2_axis0_neg_2,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis0_neg_2,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis0_neg_2);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis0_neg_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim2_axis1_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim2_axis1_2,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis1_2,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis1_2);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis1_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim2_axis1_neg_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim2_axis1_neg_2,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis1_neg_2,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis1_neg_2);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis1_neg_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim1_axis0_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim1_axis0_2,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim1_axis0_2,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim1_axis0_2);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim1_axis0_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim1_axis0_neg_2) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim1_axis0_neg_2,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim1_axis0_neg_2,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim1_axis0_neg_2);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim1_axis0_neg_2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim4_axis0_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim4_axis0_3,
             local_response_normalization_v1_2::is_ignored_axis_dim4_axis0_3,
-            local_response_normalization_v1_2::examples_axis_dim4_axis0_3);
+            local_response_normalization_v1_2::get_examples_axis_dim4_axis0_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim4_axis0_neg_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim4_axis0_neg_3,
             local_response_normalization_v1_2::is_ignored_axis_dim4_axis0_neg_3,
-            local_response_normalization_v1_2::examples_axis_dim4_axis0_neg_3);
+            local_response_normalization_v1_2::get_examples_axis_dim4_axis0_neg_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim4_axis1_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim4_axis1_3,
             local_response_normalization_v1_2::is_ignored_axis_dim4_axis1_3,
-            local_response_normalization_v1_2::examples_axis_dim4_axis1_3);
+            local_response_normalization_v1_2::get_examples_axis_dim4_axis1_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim4_axis1_neg_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim4_axis1_neg_3,
             local_response_normalization_v1_2::is_ignored_axis_dim4_axis1_neg_3,
-            local_response_normalization_v1_2::examples_axis_dim4_axis1_neg_3);
+            local_response_normalization_v1_2::get_examples_axis_dim4_axis1_neg_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim4_axis2_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim4_axis2_3,
             local_response_normalization_v1_2::is_ignored_axis_dim4_axis2_3,
-            local_response_normalization_v1_2::examples_axis_dim4_axis2_3);
+            local_response_normalization_v1_2::get_examples_axis_dim4_axis2_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim4_axis2_neg_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim4_axis2_neg_3,
             local_response_normalization_v1_2::is_ignored_axis_dim4_axis2_neg_3,
-            local_response_normalization_v1_2::examples_axis_dim4_axis2_neg_3);
+            local_response_normalization_v1_2::get_examples_axis_dim4_axis2_neg_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim4_axis3_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim4_axis3_3,
             local_response_normalization_v1_2::is_ignored_axis_dim4_axis3_3,
-            local_response_normalization_v1_2::examples_axis_dim4_axis3_3);
+            local_response_normalization_v1_2::get_examples_axis_dim4_axis3_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim4_axis3_neg_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim4_axis3_neg_3,
             local_response_normalization_v1_2::is_ignored_axis_dim4_axis3_neg_3,
-            local_response_normalization_v1_2::examples_axis_dim4_axis3_neg_3);
+            local_response_normalization_v1_2::get_examples_axis_dim4_axis3_neg_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim3_axis0_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim3_axis0_3,
             local_response_normalization_v1_2::is_ignored_axis_dim3_axis0_3,
-            local_response_normalization_v1_2::examples_axis_dim3_axis0_3);
+            local_response_normalization_v1_2::get_examples_axis_dim3_axis0_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim3_axis0_neg_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim3_axis0_neg_3,
             local_response_normalization_v1_2::is_ignored_axis_dim3_axis0_neg_3,
-            local_response_normalization_v1_2::examples_axis_dim3_axis0_neg_3);
+            local_response_normalization_v1_2::get_examples_axis_dim3_axis0_neg_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim3_axis1_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim3_axis1_3,
             local_response_normalization_v1_2::is_ignored_axis_dim3_axis1_3,
-            local_response_normalization_v1_2::examples_axis_dim3_axis1_3);
+            local_response_normalization_v1_2::get_examples_axis_dim3_axis1_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim3_axis1_neg_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim3_axis1_neg_3,
             local_response_normalization_v1_2::is_ignored_axis_dim3_axis1_neg_3,
-            local_response_normalization_v1_2::examples_axis_dim3_axis1_neg_3);
+            local_response_normalization_v1_2::get_examples_axis_dim3_axis1_neg_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim3_axis2_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim3_axis2_3,
             local_response_normalization_v1_2::is_ignored_axis_dim3_axis2_3,
-            local_response_normalization_v1_2::examples_axis_dim3_axis2_3);
+            local_response_normalization_v1_2::get_examples_axis_dim3_axis2_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim3_axis2_neg_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim3_axis2_neg_3,
             local_response_normalization_v1_2::is_ignored_axis_dim3_axis2_neg_3,
-            local_response_normalization_v1_2::examples_axis_dim3_axis2_neg_3);
+            local_response_normalization_v1_2::get_examples_axis_dim3_axis2_neg_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim2_axis0_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim2_axis0_3,
             local_response_normalization_v1_2::is_ignored_axis_dim2_axis0_3,
-            local_response_normalization_v1_2::examples_axis_dim2_axis0_3);
+            local_response_normalization_v1_2::get_examples_axis_dim2_axis0_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim2_axis0_neg_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim2_axis0_neg_3,
             local_response_normalization_v1_2::is_ignored_axis_dim2_axis0_neg_3,
-            local_response_normalization_v1_2::examples_axis_dim2_axis0_neg_3);
+            local_response_normalization_v1_2::get_examples_axis_dim2_axis0_neg_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim2_axis1_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim2_axis1_3,
             local_response_normalization_v1_2::is_ignored_axis_dim2_axis1_3,
-            local_response_normalization_v1_2::examples_axis_dim2_axis1_3);
+            local_response_normalization_v1_2::get_examples_axis_dim2_axis1_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim2_axis1_neg_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim2_axis1_neg_3,
             local_response_normalization_v1_2::is_ignored_axis_dim2_axis1_neg_3,
-            local_response_normalization_v1_2::examples_axis_dim2_axis1_neg_3);
+            local_response_normalization_v1_2::get_examples_axis_dim2_axis1_neg_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim1_axis0_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim1_axis0_3,
             local_response_normalization_v1_2::is_ignored_axis_dim1_axis0_3,
-            local_response_normalization_v1_2::examples_axis_dim1_axis0_3);
+            local_response_normalization_v1_2::get_examples_axis_dim1_axis0_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_dim1_axis0_neg_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_dim1_axis0_neg_3,
             local_response_normalization_v1_2::is_ignored_axis_dim1_axis0_neg_3,
-            local_response_normalization_v1_2::examples_axis_dim1_axis0_neg_3);
+            local_response_normalization_v1_2::get_examples_axis_dim1_axis0_neg_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim4_axis0_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis0_3,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis0_3,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis0_3);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis0_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim4_axis0_neg_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis0_neg_3,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis0_neg_3,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis0_neg_3);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis0_neg_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim4_axis1_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis1_3,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis1_3,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis1_3);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis1_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim4_axis1_neg_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis1_neg_3,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis1_neg_3,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis1_neg_3);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis1_neg_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim4_axis2_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis2_3,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis2_3,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis2_3);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis2_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim4_axis2_neg_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis2_neg_3,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis2_neg_3,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis2_neg_3);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis2_neg_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim4_axis3_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis3_3,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis3_3,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis3_3);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis3_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim4_axis3_neg_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim4_axis3_neg_3,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim4_axis3_neg_3,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim4_axis3_neg_3);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim4_axis3_neg_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim3_axis0_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim3_axis0_3,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis0_3,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis0_3);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis0_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim3_axis0_neg_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim3_axis0_neg_3,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis0_neg_3,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis0_neg_3);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis0_neg_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim3_axis1_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim3_axis1_3,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis1_3,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis1_3);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis1_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim3_axis1_neg_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim3_axis1_neg_3,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis1_neg_3,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis1_neg_3);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis1_neg_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim3_axis2_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim3_axis2_3,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis2_3,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis2_3);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis2_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim3_axis2_neg_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim3_axis2_neg_3,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim3_axis2_neg_3,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim3_axis2_neg_3);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim3_axis2_neg_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim2_axis0_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim2_axis0_3,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis0_3,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis0_3);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis0_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim2_axis0_neg_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim2_axis0_neg_3,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis0_neg_3,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis0_neg_3);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis0_neg_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim2_axis1_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim2_axis1_3,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis1_3,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis1_3);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis1_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim2_axis1_neg_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim2_axis1_neg_3,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim2_axis1_neg_3,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim2_axis1_neg_3);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim2_axis1_neg_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim1_axis0_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim1_axis0_3,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim1_axis0_3,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim1_axis0_3);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim1_axis0_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_axis_relaxed_dim1_axis0_neg_3) {
     execute(local_response_normalization_v1_2::CreateModel_axis_relaxed_dim1_axis0_neg_3,
             local_response_normalization_v1_2::is_ignored_axis_relaxed_dim1_axis0_neg_3,
-            local_response_normalization_v1_2::examples_axis_relaxed_dim1_axis0_neg_3);
+            local_response_normalization_v1_2::get_examples_axis_relaxed_dim1_axis0_neg_3());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_dim1_axis0) {
     execute(local_response_normalization_v1_2::CreateModel_dim1_axis0,
             local_response_normalization_v1_2::is_ignored_dim1_axis0,
-            local_response_normalization_v1_2::examples_dim1_axis0);
+            local_response_normalization_v1_2::get_examples_dim1_axis0());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_dim2_axis1) {
     execute(local_response_normalization_v1_2::CreateModel_dim2_axis1,
             local_response_normalization_v1_2::is_ignored_dim2_axis1,
-            local_response_normalization_v1_2::examples_dim2_axis1);
+            local_response_normalization_v1_2::get_examples_dim2_axis1());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_dim3_axis2) {
     execute(local_response_normalization_v1_2::CreateModel_dim3_axis2,
             local_response_normalization_v1_2::is_ignored_dim3_axis2,
-            local_response_normalization_v1_2::examples_dim3_axis2);
+            local_response_normalization_v1_2::get_examples_dim3_axis2());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_relaxed_dim1_axis0) {
     execute(local_response_normalization_v1_2::CreateModel_relaxed_dim1_axis0,
             local_response_normalization_v1_2::is_ignored_relaxed_dim1_axis0,
-            local_response_normalization_v1_2::examples_relaxed_dim1_axis0);
+            local_response_normalization_v1_2::get_examples_relaxed_dim1_axis0());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_relaxed_dim2_axis1) {
     execute(local_response_normalization_v1_2::CreateModel_relaxed_dim2_axis1,
             local_response_normalization_v1_2::is_ignored_relaxed_dim2_axis1,
-            local_response_normalization_v1_2::examples_relaxed_dim2_axis1);
+            local_response_normalization_v1_2::get_examples_relaxed_dim2_axis1());
 }
 
 TEST_F(GeneratedTests, local_response_normalization_v1_2_relaxed_dim3_axis2) {
     execute(local_response_normalization_v1_2::CreateModel_relaxed_dim3_axis2,
             local_response_normalization_v1_2::is_ignored_relaxed_dim3_axis2,
-            local_response_normalization_v1_2::examples_relaxed_dim3_axis2);
+            local_response_normalization_v1_2::get_examples_relaxed_dim3_axis2());
 }
 
diff --git a/nn/runtime/test/generated/tests/logistic_float16_1.mod.py.cpp b/nn/runtime/test/generated/tests/logistic_float16_1.mod.py.cpp
index ad4fe96..7ddf368 100644
--- a/nn/runtime/test/generated/tests/logistic_float16_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/logistic_float16_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, logistic_float16_1) {
     execute(logistic_float16_1::CreateModel,
             logistic_float16_1::is_ignored,
-            logistic_float16_1::examples);
+            logistic_float16_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/logistic_float16_2.mod.py.cpp b/nn/runtime/test/generated/tests/logistic_float16_2.mod.py.cpp
index c798434..f4b34f4 100644
--- a/nn/runtime/test/generated/tests/logistic_float16_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/logistic_float16_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, logistic_float16_2) {
     execute(logistic_float16_2::CreateModel,
             logistic_float16_2::is_ignored,
-            logistic_float16_2::examples);
+            logistic_float16_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/logistic_float_1.mod.py.cpp b/nn/runtime/test/generated/tests/logistic_float_1.mod.py.cpp
index 850d348..f4950da9 100644
--- a/nn/runtime/test/generated/tests/logistic_float_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/logistic_float_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, logistic_float_1) {
     execute(logistic_float_1::CreateModel,
             logistic_float_1::is_ignored,
-            logistic_float_1::examples);
+            logistic_float_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/logistic_float_1_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/logistic_float_1_relaxed.mod.py.cpp
index c12d6d0..859836b 100644
--- a/nn/runtime/test/generated/tests/logistic_float_1_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/logistic_float_1_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, logistic_float_1_relaxed) {
     execute(logistic_float_1_relaxed::CreateModel,
             logistic_float_1_relaxed::is_ignored,
-            logistic_float_1_relaxed::examples);
+            logistic_float_1_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/logistic_float_2.mod.py.cpp b/nn/runtime/test/generated/tests/logistic_float_2.mod.py.cpp
index 9a1ce4a..5a53b7a 100644
--- a/nn/runtime/test/generated/tests/logistic_float_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/logistic_float_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, logistic_float_2) {
     execute(logistic_float_2::CreateModel,
             logistic_float_2::is_ignored,
-            logistic_float_2::examples);
+            logistic_float_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/logistic_float_2_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/logistic_float_2_relaxed.mod.py.cpp
index ed26f56..fdcd5cb 100644
--- a/nn/runtime/test/generated/tests/logistic_float_2_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/logistic_float_2_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, logistic_float_2_relaxed) {
     execute(logistic_float_2_relaxed::CreateModel,
             logistic_float_2_relaxed::is_ignored,
-            logistic_float_2_relaxed::examples);
+            logistic_float_2_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/logistic_quant8_1.mod.py.cpp b/nn/runtime/test/generated/tests/logistic_quant8_1.mod.py.cpp
index f87765c..d63096b 100644
--- a/nn/runtime/test/generated/tests/logistic_quant8_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/logistic_quant8_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, logistic_quant8_1) {
     execute(logistic_quant8_1::CreateModel,
             logistic_quant8_1::is_ignored,
-            logistic_quant8_1::examples);
+            logistic_quant8_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/logistic_quant8_2.mod.py.cpp b/nn/runtime/test/generated/tests/logistic_quant8_2.mod.py.cpp
index 35311c6..26dbc91 100644
--- a/nn/runtime/test/generated/tests/logistic_quant8_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/logistic_quant8_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, logistic_quant8_2) {
     execute(logistic_quant8_2::CreateModel,
             logistic_quant8_2::is_ignored,
-            logistic_quant8_2::examples);
+            logistic_quant8_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/lsh_projection.mod.py.cpp b/nn/runtime/test/generated/tests/lsh_projection.mod.py.cpp
index 779f149..5165367 100644
--- a/nn/runtime/test/generated/tests/lsh_projection.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/lsh_projection.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, lsh_projection) {
     execute(lsh_projection::CreateModel,
             lsh_projection::is_ignored,
-            lsh_projection::examples);
+            lsh_projection::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/lsh_projection_2.mod.py.cpp b/nn/runtime/test/generated/tests/lsh_projection_2.mod.py.cpp
index 0d08f10..96ccfe3 100644
--- a/nn/runtime/test/generated/tests/lsh_projection_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/lsh_projection_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, lsh_projection_2) {
     execute(lsh_projection_2::CreateModel,
             lsh_projection_2::is_ignored,
-            lsh_projection_2::examples);
+            lsh_projection_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/lsh_projection_2_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/lsh_projection_2_relaxed.mod.py.cpp
index 5c58b54..be27afd 100644
--- a/nn/runtime/test/generated/tests/lsh_projection_2_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/lsh_projection_2_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, lsh_projection_2_relaxed) {
     execute(lsh_projection_2_relaxed::CreateModel,
             lsh_projection_2_relaxed::is_ignored,
-            lsh_projection_2_relaxed::examples);
+            lsh_projection_2_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/lsh_projection_3_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/lsh_projection_3_relaxed.mod.py.cpp
index f2edc46..d7ec09a 100644
--- a/nn/runtime/test/generated/tests/lsh_projection_3_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/lsh_projection_3_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, lsh_projection_3_relaxed) {
     execute(lsh_projection_3_relaxed::CreateModel,
             lsh_projection_3_relaxed::is_ignored,
-            lsh_projection_3_relaxed::examples);
+            lsh_projection_3_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/lsh_projection_4_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/lsh_projection_4_relaxed.mod.py.cpp
index b394778..9adcb93 100644
--- a/nn/runtime/test/generated/tests/lsh_projection_4_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/lsh_projection_4_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, lsh_projection_4_relaxed) {
     execute(lsh_projection_4_relaxed::CreateModel,
             lsh_projection_4_relaxed::is_ignored,
-            lsh_projection_4_relaxed::examples);
+            lsh_projection_4_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/lsh_projection_deprecated.mod.py.cpp b/nn/runtime/test/generated/tests/lsh_projection_deprecated.mod.py.cpp
index ee67ec8..0bef1c0 100644
--- a/nn/runtime/test/generated/tests/lsh_projection_deprecated.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/lsh_projection_deprecated.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, lsh_projection_deprecated) {
     execute(lsh_projection_deprecated::CreateModel,
             lsh_projection_deprecated::is_ignored,
-            lsh_projection_deprecated::examples);
+            lsh_projection_deprecated::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/lsh_projection_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/lsh_projection_relaxed.mod.py.cpp
index 27d6498..e9fc9d7 100644
--- a/nn/runtime/test/generated/tests/lsh_projection_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/lsh_projection_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, lsh_projection_relaxed) {
     execute(lsh_projection_relaxed::CreateModel,
             lsh_projection_relaxed::is_ignored,
-            lsh_projection_relaxed::examples);
+            lsh_projection_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/lsh_projection_weights_as_inputs.mod.py.cpp b/nn/runtime/test/generated/tests/lsh_projection_weights_as_inputs.mod.py.cpp
index c3c0131..bf60df2 100644
--- a/nn/runtime/test/generated/tests/lsh_projection_weights_as_inputs.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/lsh_projection_weights_as_inputs.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, lsh_projection_weights_as_inputs) {
     execute(lsh_projection_weights_as_inputs::CreateModel,
             lsh_projection_weights_as_inputs::is_ignored,
-            lsh_projection_weights_as_inputs::examples);
+            lsh_projection_weights_as_inputs::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/lsh_projection_weights_as_inputs_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/lsh_projection_weights_as_inputs_relaxed.mod.py.cpp
index 2db59a9..06b36fb 100644
--- a/nn/runtime/test/generated/tests/lsh_projection_weights_as_inputs_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/lsh_projection_weights_as_inputs_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, lsh_projection_weights_as_inputs_relaxed) {
     execute(lsh_projection_weights_as_inputs_relaxed::CreateModel,
             lsh_projection_weights_as_inputs_relaxed::is_ignored,
-            lsh_projection_weights_as_inputs_relaxed::examples);
+            lsh_projection_weights_as_inputs_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/lstm.mod.py.cpp b/nn/runtime/test/generated/tests/lstm.mod.py.cpp
index 6c9d535..4c16551 100644
--- a/nn/runtime/test/generated/tests/lstm.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/lstm.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, lstm) {
     execute(lstm::CreateModel,
             lstm::is_ignored,
-            lstm::examples);
+            lstm::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/lstm2.mod.py.cpp b/nn/runtime/test/generated/tests/lstm2.mod.py.cpp
index 3b68f68..f84afa5 100644
--- a/nn/runtime/test/generated/tests/lstm2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/lstm2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, lstm2) {
     execute(lstm2::CreateModel,
             lstm2::is_ignored,
-            lstm2::examples);
+            lstm2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/lstm2_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/lstm2_relaxed.mod.py.cpp
index 224a8a7..9bd85a8 100644
--- a/nn/runtime/test/generated/tests/lstm2_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/lstm2_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, lstm2_relaxed) {
     execute(lstm2_relaxed::CreateModel,
             lstm2_relaxed::is_ignored,
-            lstm2_relaxed::examples);
+            lstm2_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/lstm2_state.mod.py.cpp b/nn/runtime/test/generated/tests/lstm2_state.mod.py.cpp
index 6c56a95..36346d3 100644
--- a/nn/runtime/test/generated/tests/lstm2_state.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/lstm2_state.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, lstm2_state) {
     execute(lstm2_state::CreateModel,
             lstm2_state::is_ignored,
-            lstm2_state::examples);
+            lstm2_state::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/lstm2_state2.mod.py.cpp b/nn/runtime/test/generated/tests/lstm2_state2.mod.py.cpp
index d532d80..6e8aced 100644
--- a/nn/runtime/test/generated/tests/lstm2_state2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/lstm2_state2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, lstm2_state2) {
     execute(lstm2_state2::CreateModel,
             lstm2_state2::is_ignored,
-            lstm2_state2::examples);
+            lstm2_state2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/lstm2_state2_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/lstm2_state2_relaxed.mod.py.cpp
index cfa0b53..e2748e7 100644
--- a/nn/runtime/test/generated/tests/lstm2_state2_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/lstm2_state2_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, lstm2_state2_relaxed) {
     execute(lstm2_state2_relaxed::CreateModel,
             lstm2_state2_relaxed::is_ignored,
-            lstm2_state2_relaxed::examples);
+            lstm2_state2_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/lstm2_state_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/lstm2_state_relaxed.mod.py.cpp
index ae4d8f6..8edcf7e 100644
--- a/nn/runtime/test/generated/tests/lstm2_state_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/lstm2_state_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, lstm2_state_relaxed) {
     execute(lstm2_state_relaxed::CreateModel,
             lstm2_state_relaxed::is_ignored,
-            lstm2_state_relaxed::examples);
+            lstm2_state_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/lstm3.mod.py.cpp b/nn/runtime/test/generated/tests/lstm3.mod.py.cpp
index 69592e8..d4c3fec 100644
--- a/nn/runtime/test/generated/tests/lstm3.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/lstm3.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, lstm3) {
     execute(lstm3::CreateModel,
             lstm3::is_ignored,
-            lstm3::examples);
+            lstm3::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/lstm3_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/lstm3_relaxed.mod.py.cpp
index 3f10a00..488330c 100644
--- a/nn/runtime/test/generated/tests/lstm3_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/lstm3_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, lstm3_relaxed) {
     execute(lstm3_relaxed::CreateModel,
             lstm3_relaxed::is_ignored,
-            lstm3_relaxed::examples);
+            lstm3_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/lstm3_state.mod.py.cpp b/nn/runtime/test/generated/tests/lstm3_state.mod.py.cpp
index 0df8bb3..eb17fed 100644
--- a/nn/runtime/test/generated/tests/lstm3_state.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/lstm3_state.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, lstm3_state) {
     execute(lstm3_state::CreateModel,
             lstm3_state::is_ignored,
-            lstm3_state::examples);
+            lstm3_state::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/lstm3_state2.mod.py.cpp b/nn/runtime/test/generated/tests/lstm3_state2.mod.py.cpp
index e35e2b1..bae8f56 100644
--- a/nn/runtime/test/generated/tests/lstm3_state2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/lstm3_state2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, lstm3_state2) {
     execute(lstm3_state2::CreateModel,
             lstm3_state2::is_ignored,
-            lstm3_state2::examples);
+            lstm3_state2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/lstm3_state2_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/lstm3_state2_relaxed.mod.py.cpp
index d48b4a2..9470642 100644
--- a/nn/runtime/test/generated/tests/lstm3_state2_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/lstm3_state2_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, lstm3_state2_relaxed) {
     execute(lstm3_state2_relaxed::CreateModel,
             lstm3_state2_relaxed::is_ignored,
-            lstm3_state2_relaxed::examples);
+            lstm3_state2_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/lstm3_state3.mod.py.cpp b/nn/runtime/test/generated/tests/lstm3_state3.mod.py.cpp
index 234d390..3a0ec79 100644
--- a/nn/runtime/test/generated/tests/lstm3_state3.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/lstm3_state3.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, lstm3_state3) {
     execute(lstm3_state3::CreateModel,
             lstm3_state3::is_ignored,
-            lstm3_state3::examples);
+            lstm3_state3::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/lstm3_state3_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/lstm3_state3_relaxed.mod.py.cpp
index c19d0a8..dcfddb8 100644
--- a/nn/runtime/test/generated/tests/lstm3_state3_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/lstm3_state3_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, lstm3_state3_relaxed) {
     execute(lstm3_state3_relaxed::CreateModel,
             lstm3_state3_relaxed::is_ignored,
-            lstm3_state3_relaxed::examples);
+            lstm3_state3_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/lstm3_state_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/lstm3_state_relaxed.mod.py.cpp
index d9792e2..7a86637 100644
--- a/nn/runtime/test/generated/tests/lstm3_state_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/lstm3_state_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, lstm3_state_relaxed) {
     execute(lstm3_state_relaxed::CreateModel,
             lstm3_state_relaxed::is_ignored,
-            lstm3_state_relaxed::examples);
+            lstm3_state_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/lstm_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/lstm_relaxed.mod.py.cpp
index 02655dc..abcfd1e 100644
--- a/nn/runtime/test/generated/tests/lstm_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/lstm_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, lstm_relaxed) {
     execute(lstm_relaxed::CreateModel,
             lstm_relaxed::is_ignored,
-            lstm_relaxed::examples);
+            lstm_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/lstm_state.mod.py.cpp b/nn/runtime/test/generated/tests/lstm_state.mod.py.cpp
index 5ec7ce7..a694c4e 100644
--- a/nn/runtime/test/generated/tests/lstm_state.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/lstm_state.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, lstm_state) {
     execute(lstm_state::CreateModel,
             lstm_state::is_ignored,
-            lstm_state::examples);
+            lstm_state::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/lstm_state2.mod.py.cpp b/nn/runtime/test/generated/tests/lstm_state2.mod.py.cpp
index 6b1f946..09c0473 100644
--- a/nn/runtime/test/generated/tests/lstm_state2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/lstm_state2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, lstm_state2) {
     execute(lstm_state2::CreateModel,
             lstm_state2::is_ignored,
-            lstm_state2::examples);
+            lstm_state2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/lstm_state2_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/lstm_state2_relaxed.mod.py.cpp
index c92ae86..1654449 100644
--- a/nn/runtime/test/generated/tests/lstm_state2_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/lstm_state2_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, lstm_state2_relaxed) {
     execute(lstm_state2_relaxed::CreateModel,
             lstm_state2_relaxed::is_ignored,
-            lstm_state2_relaxed::examples);
+            lstm_state2_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/lstm_state_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/lstm_state_relaxed.mod.py.cpp
index ab325d4..51332e3 100644
--- a/nn/runtime/test/generated/tests/lstm_state_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/lstm_state_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, lstm_state_relaxed) {
     execute(lstm_state_relaxed::CreateModel,
             lstm_state_relaxed::is_ignored,
-            lstm_state_relaxed::examples);
+            lstm_state_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/max_pool_float_1.mod.py.cpp b/nn/runtime/test/generated/tests/max_pool_float_1.mod.py.cpp
index b3fc305..64e20ef 100644
--- a/nn/runtime/test/generated/tests/max_pool_float_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/max_pool_float_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, max_pool_float_1) {
     execute(max_pool_float_1::CreateModel,
             max_pool_float_1::is_ignored,
-            max_pool_float_1::examples);
+            max_pool_float_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/max_pool_float_1_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/max_pool_float_1_relaxed.mod.py.cpp
index f7f7ef5..4aebcbe 100644
--- a/nn/runtime/test/generated/tests/max_pool_float_1_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/max_pool_float_1_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, max_pool_float_1_relaxed) {
     execute(max_pool_float_1_relaxed::CreateModel,
             max_pool_float_1_relaxed::is_ignored,
-            max_pool_float_1_relaxed::examples);
+            max_pool_float_1_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/max_pool_float_2.mod.py.cpp b/nn/runtime/test/generated/tests/max_pool_float_2.mod.py.cpp
index 4eb8c32..cf2ac40 100644
--- a/nn/runtime/test/generated/tests/max_pool_float_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/max_pool_float_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, max_pool_float_2) {
     execute(max_pool_float_2::CreateModel,
             max_pool_float_2::is_ignored,
-            max_pool_float_2::examples);
+            max_pool_float_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/max_pool_float_2_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/max_pool_float_2_relaxed.mod.py.cpp
index 3391eab..c519dea 100644
--- a/nn/runtime/test/generated/tests/max_pool_float_2_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/max_pool_float_2_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, max_pool_float_2_relaxed) {
     execute(max_pool_float_2_relaxed::CreateModel,
             max_pool_float_2_relaxed::is_ignored,
-            max_pool_float_2_relaxed::examples);
+            max_pool_float_2_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/max_pool_float_3.mod.py.cpp b/nn/runtime/test/generated/tests/max_pool_float_3.mod.py.cpp
index 27f75b3..34edfd1 100644
--- a/nn/runtime/test/generated/tests/max_pool_float_3.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/max_pool_float_3.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, max_pool_float_3) {
     execute(max_pool_float_3::CreateModel,
             max_pool_float_3::is_ignored,
-            max_pool_float_3::examples);
+            max_pool_float_3::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/max_pool_float_3_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/max_pool_float_3_relaxed.mod.py.cpp
index 9209c1e..1fe59cb 100644
--- a/nn/runtime/test/generated/tests/max_pool_float_3_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/max_pool_float_3_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, max_pool_float_3_relaxed) {
     execute(max_pool_float_3_relaxed::CreateModel,
             max_pool_float_3_relaxed::is_ignored,
-            max_pool_float_3_relaxed::examples);
+            max_pool_float_3_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/max_pool_float_4.mod.py.cpp b/nn/runtime/test/generated/tests/max_pool_float_4.mod.py.cpp
index 59d6ba9..a289c36 100644
--- a/nn/runtime/test/generated/tests/max_pool_float_4.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/max_pool_float_4.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, max_pool_float_4) {
     execute(max_pool_float_4::CreateModel,
             max_pool_float_4::is_ignored,
-            max_pool_float_4::examples);
+            max_pool_float_4::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/max_pool_float_4_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/max_pool_float_4_relaxed.mod.py.cpp
index c23fe38..1bb8a88 100644
--- a/nn/runtime/test/generated/tests/max_pool_float_4_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/max_pool_float_4_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, max_pool_float_4_relaxed) {
     execute(max_pool_float_4_relaxed::CreateModel,
             max_pool_float_4_relaxed::is_ignored,
-            max_pool_float_4_relaxed::examples);
+            max_pool_float_4_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/max_pool_quant8_1.mod.py.cpp b/nn/runtime/test/generated/tests/max_pool_quant8_1.mod.py.cpp
index a9ddc24..c19781a 100644
--- a/nn/runtime/test/generated/tests/max_pool_quant8_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/max_pool_quant8_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, max_pool_quant8_1) {
     execute(max_pool_quant8_1::CreateModel,
             max_pool_quant8_1::is_ignored,
-            max_pool_quant8_1::examples);
+            max_pool_quant8_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/max_pool_quant8_2.mod.py.cpp b/nn/runtime/test/generated/tests/max_pool_quant8_2.mod.py.cpp
index d1f68d1..764308e 100644
--- a/nn/runtime/test/generated/tests/max_pool_quant8_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/max_pool_quant8_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, max_pool_quant8_2) {
     execute(max_pool_quant8_2::CreateModel,
             max_pool_quant8_2::is_ignored,
-            max_pool_quant8_2::examples);
+            max_pool_quant8_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/max_pool_quant8_3.mod.py.cpp b/nn/runtime/test/generated/tests/max_pool_quant8_3.mod.py.cpp
index b4ffe02..541bf3c 100644
--- a/nn/runtime/test/generated/tests/max_pool_quant8_3.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/max_pool_quant8_3.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, max_pool_quant8_3) {
     execute(max_pool_quant8_3::CreateModel,
             max_pool_quant8_3::is_ignored,
-            max_pool_quant8_3::examples);
+            max_pool_quant8_3::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/max_pool_quant8_4.mod.py.cpp b/nn/runtime/test/generated/tests/max_pool_quant8_4.mod.py.cpp
index b9b0e80..5e03cfc 100644
--- a/nn/runtime/test/generated/tests/max_pool_quant8_4.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/max_pool_quant8_4.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, max_pool_quant8_4) {
     execute(max_pool_quant8_4::CreateModel,
             max_pool_quant8_4::is_ignored,
-            max_pool_quant8_4::examples);
+            max_pool_quant8_4::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/max_pool_v1_2.mod.py.cpp b/nn/runtime/test/generated/tests/max_pool_v1_2.mod.py.cpp
index ccf0e2b..e9ec73f 100644
--- a/nn/runtime/test/generated/tests/max_pool_v1_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/max_pool_v1_2.mod.py.cpp
@@ -12,144 +12,144 @@
 TEST_F(GeneratedTests, max_pool_v1_2_nhwc) {
     execute(max_pool_v1_2::CreateModel_nhwc,
             max_pool_v1_2::is_ignored_nhwc,
-            max_pool_v1_2::examples_nhwc);
+            max_pool_v1_2::get_examples_nhwc());
 }
 
 TEST_F(GeneratedTests, max_pool_v1_2_nhwc_relaxed) {
     execute(max_pool_v1_2::CreateModel_nhwc_relaxed,
             max_pool_v1_2::is_ignored_nhwc_relaxed,
-            max_pool_v1_2::examples_nhwc_relaxed);
+            max_pool_v1_2::get_examples_nhwc_relaxed());
 }
 
 TEST_F(GeneratedTests, max_pool_v1_2_nhwc_quant8) {
     execute(max_pool_v1_2::CreateModel_nhwc_quant8,
             max_pool_v1_2::is_ignored_nhwc_quant8,
-            max_pool_v1_2::examples_nhwc_quant8);
+            max_pool_v1_2::get_examples_nhwc_quant8());
 }
 
 TEST_F(GeneratedTests, max_pool_v1_2_nchw) {
     execute(max_pool_v1_2::CreateModel_nchw,
             max_pool_v1_2::is_ignored_nchw,
-            max_pool_v1_2::examples_nchw);
+            max_pool_v1_2::get_examples_nchw());
 }
 
 TEST_F(GeneratedTests, max_pool_v1_2_nchw_relaxed) {
     execute(max_pool_v1_2::CreateModel_nchw_relaxed,
             max_pool_v1_2::is_ignored_nchw_relaxed,
-            max_pool_v1_2::examples_nchw_relaxed);
+            max_pool_v1_2::get_examples_nchw_relaxed());
 }
 
 TEST_F(GeneratedTests, max_pool_v1_2_nchw_quant8) {
     execute(max_pool_v1_2::CreateModel_nchw_quant8,
             max_pool_v1_2::is_ignored_nchw_quant8,
-            max_pool_v1_2::examples_nchw_quant8);
+            max_pool_v1_2::get_examples_nchw_quant8());
 }
 
 TEST_F(GeneratedTests, max_pool_v1_2_nhwc_2) {
     execute(max_pool_v1_2::CreateModel_nhwc_2,
             max_pool_v1_2::is_ignored_nhwc_2,
-            max_pool_v1_2::examples_nhwc_2);
+            max_pool_v1_2::get_examples_nhwc_2());
 }
 
 TEST_F(GeneratedTests, max_pool_v1_2_nhwc_relaxed_2) {
     execute(max_pool_v1_2::CreateModel_nhwc_relaxed_2,
             max_pool_v1_2::is_ignored_nhwc_relaxed_2,
-            max_pool_v1_2::examples_nhwc_relaxed_2);
+            max_pool_v1_2::get_examples_nhwc_relaxed_2());
 }
 
 TEST_F(GeneratedTests, max_pool_v1_2_nhwc_quant8_2) {
     execute(max_pool_v1_2::CreateModel_nhwc_quant8_2,
             max_pool_v1_2::is_ignored_nhwc_quant8_2,
-            max_pool_v1_2::examples_nhwc_quant8_2);
+            max_pool_v1_2::get_examples_nhwc_quant8_2());
 }
 
 TEST_F(GeneratedTests, max_pool_v1_2_nchw_2) {
     execute(max_pool_v1_2::CreateModel_nchw_2,
             max_pool_v1_2::is_ignored_nchw_2,
-            max_pool_v1_2::examples_nchw_2);
+            max_pool_v1_2::get_examples_nchw_2());
 }
 
 TEST_F(GeneratedTests, max_pool_v1_2_nchw_relaxed_2) {
     execute(max_pool_v1_2::CreateModel_nchw_relaxed_2,
             max_pool_v1_2::is_ignored_nchw_relaxed_2,
-            max_pool_v1_2::examples_nchw_relaxed_2);
+            max_pool_v1_2::get_examples_nchw_relaxed_2());
 }
 
 TEST_F(GeneratedTests, max_pool_v1_2_nchw_quant8_2) {
     execute(max_pool_v1_2::CreateModel_nchw_quant8_2,
             max_pool_v1_2::is_ignored_nchw_quant8_2,
-            max_pool_v1_2::examples_nchw_quant8_2);
+            max_pool_v1_2::get_examples_nchw_quant8_2());
 }
 
 TEST_F(GeneratedTests, max_pool_v1_2_nhwc_3) {
     execute(max_pool_v1_2::CreateModel_nhwc_3,
             max_pool_v1_2::is_ignored_nhwc_3,
-            max_pool_v1_2::examples_nhwc_3);
+            max_pool_v1_2::get_examples_nhwc_3());
 }
 
 TEST_F(GeneratedTests, max_pool_v1_2_nhwc_relaxed_3) {
     execute(max_pool_v1_2::CreateModel_nhwc_relaxed_3,
             max_pool_v1_2::is_ignored_nhwc_relaxed_3,
-            max_pool_v1_2::examples_nhwc_relaxed_3);
+            max_pool_v1_2::get_examples_nhwc_relaxed_3());
 }
 
 TEST_F(GeneratedTests, max_pool_v1_2_nhwc_quant8_3) {
     execute(max_pool_v1_2::CreateModel_nhwc_quant8_3,
             max_pool_v1_2::is_ignored_nhwc_quant8_3,
-            max_pool_v1_2::examples_nhwc_quant8_3);
+            max_pool_v1_2::get_examples_nhwc_quant8_3());
 }
 
 TEST_F(GeneratedTests, max_pool_v1_2_nchw_3) {
     execute(max_pool_v1_2::CreateModel_nchw_3,
             max_pool_v1_2::is_ignored_nchw_3,
-            max_pool_v1_2::examples_nchw_3);
+            max_pool_v1_2::get_examples_nchw_3());
 }
 
 TEST_F(GeneratedTests, max_pool_v1_2_nchw_relaxed_3) {
     execute(max_pool_v1_2::CreateModel_nchw_relaxed_3,
             max_pool_v1_2::is_ignored_nchw_relaxed_3,
-            max_pool_v1_2::examples_nchw_relaxed_3);
+            max_pool_v1_2::get_examples_nchw_relaxed_3());
 }
 
 TEST_F(GeneratedTests, max_pool_v1_2_nchw_quant8_3) {
     execute(max_pool_v1_2::CreateModel_nchw_quant8_3,
             max_pool_v1_2::is_ignored_nchw_quant8_3,
-            max_pool_v1_2::examples_nchw_quant8_3);
+            max_pool_v1_2::get_examples_nchw_quant8_3());
 }
 
 TEST_F(GeneratedTests, max_pool_v1_2_nhwc_4) {
     execute(max_pool_v1_2::CreateModel_nhwc_4,
             max_pool_v1_2::is_ignored_nhwc_4,
-            max_pool_v1_2::examples_nhwc_4);
+            max_pool_v1_2::get_examples_nhwc_4());
 }
 
 TEST_F(GeneratedTests, max_pool_v1_2_nhwc_relaxed_4) {
     execute(max_pool_v1_2::CreateModel_nhwc_relaxed_4,
             max_pool_v1_2::is_ignored_nhwc_relaxed_4,
-            max_pool_v1_2::examples_nhwc_relaxed_4);
+            max_pool_v1_2::get_examples_nhwc_relaxed_4());
 }
 
 TEST_F(GeneratedTests, max_pool_v1_2_nhwc_quant8_4) {
     execute(max_pool_v1_2::CreateModel_nhwc_quant8_4,
             max_pool_v1_2::is_ignored_nhwc_quant8_4,
-            max_pool_v1_2::examples_nhwc_quant8_4);
+            max_pool_v1_2::get_examples_nhwc_quant8_4());
 }
 
 TEST_F(GeneratedTests, max_pool_v1_2_nchw_4) {
     execute(max_pool_v1_2::CreateModel_nchw_4,
             max_pool_v1_2::is_ignored_nchw_4,
-            max_pool_v1_2::examples_nchw_4);
+            max_pool_v1_2::get_examples_nchw_4());
 }
 
 TEST_F(GeneratedTests, max_pool_v1_2_nchw_relaxed_4) {
     execute(max_pool_v1_2::CreateModel_nchw_relaxed_4,
             max_pool_v1_2::is_ignored_nchw_relaxed_4,
-            max_pool_v1_2::examples_nchw_relaxed_4);
+            max_pool_v1_2::get_examples_nchw_relaxed_4());
 }
 
 TEST_F(GeneratedTests, max_pool_v1_2_nchw_quant8_4) {
     execute(max_pool_v1_2::CreateModel_nchw_quant8_4,
             max_pool_v1_2::is_ignored_nchw_quant8_4,
-            max_pool_v1_2::examples_nchw_quant8_4);
+            max_pool_v1_2::get_examples_nchw_quant8_4());
 }
 
diff --git a/nn/runtime/test/generated/tests/maximum.mod.py.cpp b/nn/runtime/test/generated/tests/maximum.mod.py.cpp
index ccdb17d..9a57a3f 100644
--- a/nn/runtime/test/generated/tests/maximum.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/maximum.mod.py.cpp
@@ -12,60 +12,60 @@
 TEST_F(GeneratedTests, maximum_simple) {
     execute(maximum::CreateModel,
             maximum::is_ignored,
-            maximum::examples_simple);
+            maximum::get_examples_simple());
 }
 
 TEST_F(GeneratedTests, maximum_simple_relaxed) {
     execute(maximum::CreateModel_relaxed,
             maximum::is_ignored_relaxed,
-            maximum::examples_simple_relaxed);
+            maximum::get_examples_simple_relaxed());
 }
 
 TEST_F(GeneratedTests, maximum_simple_quant8) {
     execute(maximum::CreateModel_quant8,
             maximum::is_ignored_quant8,
-            maximum::examples_simple_quant8);
+            maximum::get_examples_simple_quant8());
 }
 
 TEST_F(GeneratedTests, maximum_simple_int32) {
     execute(maximum::CreateModel_int32,
             maximum::is_ignored_int32,
-            maximum::examples_simple_int32);
+            maximum::get_examples_simple_int32());
 }
 
 TEST_F(GeneratedTests, maximum_simple_float16) {
     execute(maximum::CreateModel_float16,
             maximum::is_ignored_float16,
-            maximum::examples_simple_float16);
+            maximum::get_examples_simple_float16());
 }
 
 TEST_F(GeneratedTests, maximum_broadcast) {
     execute(maximum::CreateModel_2,
             maximum::is_ignored_2,
-            maximum::examples_broadcast);
+            maximum::get_examples_broadcast());
 }
 
 TEST_F(GeneratedTests, maximum_broadcast_relaxed) {
     execute(maximum::CreateModel_relaxed_2,
             maximum::is_ignored_relaxed_2,
-            maximum::examples_broadcast_relaxed);
+            maximum::get_examples_broadcast_relaxed());
 }
 
 TEST_F(GeneratedTests, maximum_broadcast_quant8) {
     execute(maximum::CreateModel_quant8_2,
             maximum::is_ignored_quant8_2,
-            maximum::examples_broadcast_quant8);
+            maximum::get_examples_broadcast_quant8());
 }
 
 TEST_F(GeneratedTests, maximum_broadcast_int32) {
     execute(maximum::CreateModel_int32_2,
             maximum::is_ignored_int32_2,
-            maximum::examples_broadcast_int32);
+            maximum::get_examples_broadcast_int32());
 }
 
 TEST_F(GeneratedTests, maximum_broadcast_float16) {
     execute(maximum::CreateModel_float16_2,
             maximum::is_ignored_float16_2,
-            maximum::examples_broadcast_float16);
+            maximum::get_examples_broadcast_float16());
 }
 
diff --git a/nn/runtime/test/generated/tests/mean.mod.py.cpp b/nn/runtime/test/generated/tests/mean.mod.py.cpp
index 5508977..1ad58db 100644
--- a/nn/runtime/test/generated/tests/mean.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/mean.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, mean) {
     execute(mean::CreateModel,
             mean::is_ignored,
-            mean::examples);
+            mean::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/mean_float_1.mod.py.cpp b/nn/runtime/test/generated/tests/mean_float_1.mod.py.cpp
index 15458e1..b34eec6 100644
--- a/nn/runtime/test/generated/tests/mean_float_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/mean_float_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, mean_float_1) {
     execute(mean_float_1::CreateModel,
             mean_float_1::is_ignored,
-            mean_float_1::examples);
+            mean_float_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/mean_float_1_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/mean_float_1_relaxed.mod.py.cpp
index 5b3f87d..dbc3db8 100644
--- a/nn/runtime/test/generated/tests/mean_float_1_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/mean_float_1_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, mean_float_1_relaxed) {
     execute(mean_float_1_relaxed::CreateModel,
             mean_float_1_relaxed::is_ignored,
-            mean_float_1_relaxed::examples);
+            mean_float_1_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/mean_float_2.mod.py.cpp b/nn/runtime/test/generated/tests/mean_float_2.mod.py.cpp
index 5bb294f..43af7aa 100644
--- a/nn/runtime/test/generated/tests/mean_float_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/mean_float_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, mean_float_2) {
     execute(mean_float_2::CreateModel,
             mean_float_2::is_ignored,
-            mean_float_2::examples);
+            mean_float_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/mean_float_2_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/mean_float_2_relaxed.mod.py.cpp
index 6d9936b..f0eece1 100644
--- a/nn/runtime/test/generated/tests/mean_float_2_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/mean_float_2_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, mean_float_2_relaxed) {
     execute(mean_float_2_relaxed::CreateModel,
             mean_float_2_relaxed::is_ignored,
-            mean_float_2_relaxed::examples);
+            mean_float_2_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/mean_quant8_1.mod.py.cpp b/nn/runtime/test/generated/tests/mean_quant8_1.mod.py.cpp
index daab8fb..f1d4328 100644
--- a/nn/runtime/test/generated/tests/mean_quant8_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/mean_quant8_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, mean_quant8_1) {
     execute(mean_quant8_1::CreateModel,
             mean_quant8_1::is_ignored,
-            mean_quant8_1::examples);
+            mean_quant8_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/mean_quant8_2.mod.py.cpp b/nn/runtime/test/generated/tests/mean_quant8_2.mod.py.cpp
index ebe8473..77046ad 100644
--- a/nn/runtime/test/generated/tests/mean_quant8_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/mean_quant8_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, mean_quant8_2) {
     execute(mean_quant8_2::CreateModel,
             mean_quant8_2::is_ignored,
-            mean_quant8_2::examples);
+            mean_quant8_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/mean_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/mean_relaxed.mod.py.cpp
index 7ed8a13..74271d3 100644
--- a/nn/runtime/test/generated/tests/mean_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/mean_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, mean_relaxed) {
     execute(mean_relaxed::CreateModel,
             mean_relaxed::is_ignored,
-            mean_relaxed::examples);
+            mean_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/minimum.mod.py.cpp b/nn/runtime/test/generated/tests/minimum.mod.py.cpp
index 96d7250..65a430e 100644
--- a/nn/runtime/test/generated/tests/minimum.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/minimum.mod.py.cpp
@@ -12,60 +12,60 @@
 TEST_F(GeneratedTests, minimum_simple) {
     execute(minimum::CreateModel,
             minimum::is_ignored,
-            minimum::examples_simple);
+            minimum::get_examples_simple());
 }
 
 TEST_F(GeneratedTests, minimum_simple_relaxed) {
     execute(minimum::CreateModel_relaxed,
             minimum::is_ignored_relaxed,
-            minimum::examples_simple_relaxed);
+            minimum::get_examples_simple_relaxed());
 }
 
 TEST_F(GeneratedTests, minimum_simple_quant8) {
     execute(minimum::CreateModel_quant8,
             minimum::is_ignored_quant8,
-            minimum::examples_simple_quant8);
+            minimum::get_examples_simple_quant8());
 }
 
 TEST_F(GeneratedTests, minimum_simple_int32) {
     execute(minimum::CreateModel_int32,
             minimum::is_ignored_int32,
-            minimum::examples_simple_int32);
+            minimum::get_examples_simple_int32());
 }
 
 TEST_F(GeneratedTests, minimum_simple_float16) {
     execute(minimum::CreateModel_float16,
             minimum::is_ignored_float16,
-            minimum::examples_simple_float16);
+            minimum::get_examples_simple_float16());
 }
 
 TEST_F(GeneratedTests, minimum_broadcast) {
     execute(minimum::CreateModel_2,
             minimum::is_ignored_2,
-            minimum::examples_broadcast);
+            minimum::get_examples_broadcast());
 }
 
 TEST_F(GeneratedTests, minimum_broadcast_relaxed) {
     execute(minimum::CreateModel_relaxed_2,
             minimum::is_ignored_relaxed_2,
-            minimum::examples_broadcast_relaxed);
+            minimum::get_examples_broadcast_relaxed());
 }
 
 TEST_F(GeneratedTests, minimum_broadcast_quant8) {
     execute(minimum::CreateModel_quant8_2,
             minimum::is_ignored_quant8_2,
-            minimum::examples_broadcast_quant8);
+            minimum::get_examples_broadcast_quant8());
 }
 
 TEST_F(GeneratedTests, minimum_broadcast_int32) {
     execute(minimum::CreateModel_int32_2,
             minimum::is_ignored_int32_2,
-            minimum::examples_broadcast_int32);
+            minimum::get_examples_broadcast_int32());
 }
 
 TEST_F(GeneratedTests, minimum_broadcast_float16) {
     execute(minimum::CreateModel_float16_2,
             minimum::is_ignored_float16_2,
-            minimum::examples_broadcast_float16);
+            minimum::get_examples_broadcast_float16());
 }
 
diff --git a/nn/runtime/test/generated/tests/mobilenet_224_gender_basic_fixed.mod.py.cpp b/nn/runtime/test/generated/tests/mobilenet_224_gender_basic_fixed.mod.py.cpp
index 7e7ad12..72d383f 100644
--- a/nn/runtime/test/generated/tests/mobilenet_224_gender_basic_fixed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/mobilenet_224_gender_basic_fixed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, mobilenet_224_gender_basic_fixed) {
     execute(mobilenet_224_gender_basic_fixed::CreateModel,
             mobilenet_224_gender_basic_fixed::is_ignored,
-            mobilenet_224_gender_basic_fixed::examples);
+            mobilenet_224_gender_basic_fixed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/mobilenet_224_gender_basic_fixed_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/mobilenet_224_gender_basic_fixed_relaxed.mod.py.cpp
index 7db14a7..45bfdc0 100644
--- a/nn/runtime/test/generated/tests/mobilenet_224_gender_basic_fixed_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/mobilenet_224_gender_basic_fixed_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, mobilenet_224_gender_basic_fixed_relaxed) {
     execute(mobilenet_224_gender_basic_fixed_relaxed::CreateModel,
             mobilenet_224_gender_basic_fixed_relaxed::is_ignored,
-            mobilenet_224_gender_basic_fixed_relaxed::examples);
+            mobilenet_224_gender_basic_fixed_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/mobilenet_quantized.mod.py.cpp b/nn/runtime/test/generated/tests/mobilenet_quantized.mod.py.cpp
index 31a7b4a..d8e1630 100644
--- a/nn/runtime/test/generated/tests/mobilenet_quantized.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/mobilenet_quantized.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, mobilenet_quantized) {
     execute(mobilenet_quantized::CreateModel,
             mobilenet_quantized::is_ignored,
-            mobilenet_quantized::examples);
+            mobilenet_quantized::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/mul.mod.py.cpp b/nn/runtime/test/generated/tests/mul.mod.py.cpp
index 3deccc6..45a4437 100644
--- a/nn/runtime/test/generated/tests/mul.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/mul.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, mul) {
     execute(mul::CreateModel,
             mul::is_ignored,
-            mul::examples);
+            mul::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/mul_broadcast_float16.mod.py.cpp b/nn/runtime/test/generated/tests/mul_broadcast_float16.mod.py.cpp
index 7d0db16..ee99e39 100644
--- a/nn/runtime/test/generated/tests/mul_broadcast_float16.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/mul_broadcast_float16.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, mul_broadcast_float16) {
     execute(mul_broadcast_float16::CreateModel,
             mul_broadcast_float16::is_ignored,
-            mul_broadcast_float16::examples);
+            mul_broadcast_float16::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/mul_broadcast_quant8.mod.py.cpp b/nn/runtime/test/generated/tests/mul_broadcast_quant8.mod.py.cpp
index 630b900..2a81255 100644
--- a/nn/runtime/test/generated/tests/mul_broadcast_quant8.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/mul_broadcast_quant8.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, mul_broadcast_quant8) {
     execute(mul_broadcast_quant8::CreateModel,
             mul_broadcast_quant8::is_ignored,
-            mul_broadcast_quant8::examples);
+            mul_broadcast_quant8::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/mul_float16.mod.py.cpp b/nn/runtime/test/generated/tests/mul_float16.mod.py.cpp
index c320847..d534d21 100644
--- a/nn/runtime/test/generated/tests/mul_float16.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/mul_float16.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, mul_float16) {
     execute(mul_float16::CreateModel,
             mul_float16::is_ignored,
-            mul_float16::examples);
+            mul_float16::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/mul_quant8.mod.py.cpp b/nn/runtime/test/generated/tests/mul_quant8.mod.py.cpp
index 4691456..afa0e12 100644
--- a/nn/runtime/test/generated/tests/mul_quant8.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/mul_quant8.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, mul_quant8) {
     execute(mul_quant8::CreateModel,
             mul_quant8::is_ignored,
-            mul_quant8::examples);
+            mul_quant8::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/mul_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/mul_relaxed.mod.py.cpp
index fd2bbd3..9cee39c 100644
--- a/nn/runtime/test/generated/tests/mul_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/mul_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, mul_relaxed) {
     execute(mul_relaxed::CreateModel,
             mul_relaxed::is_ignored,
-            mul_relaxed::examples);
+            mul_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/mul_relu.mod.py.cpp b/nn/runtime/test/generated/tests/mul_relu.mod.py.cpp
index 27128fe..d9fff87 100644
--- a/nn/runtime/test/generated/tests/mul_relu.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/mul_relu.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, mul_relu) {
     execute(mul_relu::CreateModel,
             mul_relu::is_ignored,
-            mul_relu::examples);
+            mul_relu::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/mul_relu_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/mul_relu_relaxed.mod.py.cpp
index 0adc1d0..a66121e 100644
--- a/nn/runtime/test/generated/tests/mul_relu_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/mul_relu_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, mul_relu_relaxed) {
     execute(mul_relu_relaxed::CreateModel,
             mul_relu_relaxed::is_ignored,
-            mul_relu_relaxed::examples);
+            mul_relu_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/pad.mod.py.cpp b/nn/runtime/test/generated/tests/pad.mod.py.cpp
index 9834819..a65e078 100644
--- a/nn/runtime/test/generated/tests/pad.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/pad.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, pad) {
     execute(pad::CreateModel,
             pad::is_ignored,
-            pad::examples);
+            pad::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/pad_float16.mod.py.cpp b/nn/runtime/test/generated/tests/pad_float16.mod.py.cpp
new file mode 100644
index 0000000..29c3fef
--- /dev/null
+++ b/nn/runtime/test/generated/tests/pad_float16.mod.py.cpp
@@ -0,0 +1,17 @@
+// clang-format off
+// Generated file (from: pad_float16.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace pad_float16 {
+// Generated pad_float16 test
+#include "generated/examples/pad_float16.example.cpp"
+// Generated model constructor
+#include "generated/models/pad_float16.model.cpp"
+} // namespace pad_float16
+
+TEST_F(GeneratedTests, pad_float16) {
+    execute(pad_float16::CreateModel,
+            pad_float16::is_ignored,
+            pad_float16::get_examples());
+}
+
diff --git a/nn/runtime/test/generated/tests/pad_float_1.mod.py.cpp b/nn/runtime/test/generated/tests/pad_float_1.mod.py.cpp
index e951174..90c2deb 100644
--- a/nn/runtime/test/generated/tests/pad_float_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/pad_float_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, pad_float_1) {
     execute(pad_float_1::CreateModel,
             pad_float_1::is_ignored,
-            pad_float_1::examples);
+            pad_float_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/pad_float_1_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/pad_float_1_relaxed.mod.py.cpp
index 5a123f9..ead0623 100644
--- a/nn/runtime/test/generated/tests/pad_float_1_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/pad_float_1_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, pad_float_1_relaxed) {
     execute(pad_float_1_relaxed::CreateModel,
             pad_float_1_relaxed::is_ignored,
-            pad_float_1_relaxed::examples);
+            pad_float_1_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/pad_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/pad_relaxed.mod.py.cpp
index 9964120..63f0d6f 100644
--- a/nn/runtime/test/generated/tests/pad_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/pad_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, pad_relaxed) {
     execute(pad_relaxed::CreateModel,
             pad_relaxed::is_ignored,
-            pad_relaxed::examples);
+            pad_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/pad_v2_1_float.mod.py.cpp b/nn/runtime/test/generated/tests/pad_v2_1_float.mod.py.cpp
index af92582..91530be 100644
--- a/nn/runtime/test/generated/tests/pad_v2_1_float.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/pad_v2_1_float.mod.py.cpp
@@ -12,6 +12,12 @@
 TEST_F(GeneratedTests, pad_v2_1_float) {
     execute(pad_v2_1_float::CreateModel,
             pad_v2_1_float::is_ignored,
-            pad_v2_1_float::examples);
+            pad_v2_1_float::get_examples());
+}
+
+TEST_F(GeneratedTests, pad_v2_1_float_float16) {
+    execute(pad_v2_1_float::CreateModel_float16,
+            pad_v2_1_float::is_ignored_float16,
+            pad_v2_1_float::get_examples_float16());
 }
 
diff --git a/nn/runtime/test/generated/tests/pad_v2_1_float_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/pad_v2_1_float_relaxed.mod.py.cpp
index 8e7fcd8..9917f15 100644
--- a/nn/runtime/test/generated/tests/pad_v2_1_float_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/pad_v2_1_float_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, pad_v2_1_float_relaxed) {
     execute(pad_v2_1_float_relaxed::CreateModel,
             pad_v2_1_float_relaxed::is_ignored,
-            pad_v2_1_float_relaxed::examples);
+            pad_v2_1_float_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/pad_v2_1_quant8.mod.py.cpp b/nn/runtime/test/generated/tests/pad_v2_1_quant8.mod.py.cpp
index 0ee65d5..db9346f 100644
--- a/nn/runtime/test/generated/tests/pad_v2_1_quant8.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/pad_v2_1_quant8.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, pad_v2_1_quant8) {
     execute(pad_v2_1_quant8::CreateModel,
             pad_v2_1_quant8::is_ignored,
-            pad_v2_1_quant8::examples);
+            pad_v2_1_quant8::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/pow.mod.py.cpp b/nn/runtime/test/generated/tests/pow.mod.py.cpp
index 19bbfa6..2ebb77b 100644
--- a/nn/runtime/test/generated/tests/pow.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/pow.mod.py.cpp
@@ -12,36 +12,36 @@
 TEST_F(GeneratedTests, pow) {
     execute(pow::CreateModel,
             pow::is_ignored,
-            pow::examples);
+            pow::get_examples());
 }
 
 TEST_F(GeneratedTests, pow_relaxed) {
     execute(pow::CreateModel_relaxed,
             pow::is_ignored_relaxed,
-            pow::examples_relaxed);
+            pow::get_examples_relaxed());
 }
 
 TEST_F(GeneratedTests, pow_2) {
     execute(pow::CreateModel_2,
             pow::is_ignored_2,
-            pow::examples_2);
+            pow::get_examples_2());
 }
 
 TEST_F(GeneratedTests, pow_relaxed_2) {
     execute(pow::CreateModel_relaxed_2,
             pow::is_ignored_relaxed_2,
-            pow::examples_relaxed_2);
+            pow::get_examples_relaxed_2());
 }
 
 TEST_F(GeneratedTests, pow_3) {
     execute(pow::CreateModel_3,
             pow::is_ignored_3,
-            pow::examples_3);
+            pow::get_examples_3());
 }
 
 TEST_F(GeneratedTests, pow_relaxed_3) {
     execute(pow::CreateModel_relaxed_3,
             pow::is_ignored_relaxed_3,
-            pow::examples_relaxed_3);
+            pow::get_examples_relaxed_3());
 }
 
diff --git a/nn/runtime/test/generated/tests/prelu.mod.py.cpp b/nn/runtime/test/generated/tests/prelu.mod.py.cpp
index a7f7c0d..f60f857 100644
--- a/nn/runtime/test/generated/tests/prelu.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/prelu.mod.py.cpp
@@ -12,36 +12,36 @@
 TEST_F(GeneratedTests, prelu) {
     execute(prelu::CreateModel,
             prelu::is_ignored,
-            prelu::examples);
+            prelu::get_examples());
 }
 
 TEST_F(GeneratedTests, prelu_relaxed) {
     execute(prelu::CreateModel_relaxed,
             prelu::is_ignored_relaxed,
-            prelu::examples_relaxed);
+            prelu::get_examples_relaxed());
 }
 
 TEST_F(GeneratedTests, prelu_quant8) {
     execute(prelu::CreateModel_quant8,
             prelu::is_ignored_quant8,
-            prelu::examples_quant8);
+            prelu::get_examples_quant8());
 }
 
 TEST_F(GeneratedTests, prelu_weight_as_input) {
     execute(prelu::CreateModel_weight_as_input,
             prelu::is_ignored_weight_as_input,
-            prelu::examples_weight_as_input);
+            prelu::get_examples_weight_as_input());
 }
 
 TEST_F(GeneratedTests, prelu_weight_as_input_relaxed) {
     execute(prelu::CreateModel_weight_as_input_relaxed,
             prelu::is_ignored_weight_as_input_relaxed,
-            prelu::examples_weight_as_input_relaxed);
+            prelu::get_examples_weight_as_input_relaxed());
 }
 
 TEST_F(GeneratedTests, prelu_weight_as_input_quant8) {
     execute(prelu::CreateModel_weight_as_input_quant8,
             prelu::is_ignored_weight_as_input_quant8,
-            prelu::examples_weight_as_input_quant8);
+            prelu::get_examples_weight_as_input_quant8());
 }
 
diff --git a/nn/runtime/test/generated/tests/quantize.mod.py.cpp b/nn/runtime/test/generated/tests/quantize.mod.py.cpp
index c3ec212..96d006e 100644
--- a/nn/runtime/test/generated/tests/quantize.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/quantize.mod.py.cpp
@@ -12,24 +12,24 @@
 TEST_F(GeneratedTests, quantize_quant8) {
     execute(quantize::CreateModel_quant8,
             quantize::is_ignored_quant8,
-            quantize::examples_quant8);
+            quantize::get_examples_quant8());
 }
 
 TEST_F(GeneratedTests, quantize_quant8_2) {
     execute(quantize::CreateModel_quant8_2,
             quantize::is_ignored_quant8_2,
-            quantize::examples_quant8_2);
+            quantize::get_examples_quant8_2());
 }
 
 TEST_F(GeneratedTests, quantize_quant8_3) {
     execute(quantize::CreateModel_quant8_3,
             quantize::is_ignored_quant8_3,
-            quantize::examples_quant8_3);
+            quantize::get_examples_quant8_3());
 }
 
 TEST_F(GeneratedTests, quantize_quant8_4) {
     execute(quantize::CreateModel_quant8_4,
             quantize::is_ignored_quant8_4,
-            quantize::examples_quant8_4);
+            quantize::get_examples_quant8_4());
 }
 
diff --git a/nn/runtime/test/generated/tests/quantized_lstm.mod.py.cpp b/nn/runtime/test/generated/tests/quantized_lstm.mod.py.cpp
index aef2efc..5b94206 100644
--- a/nn/runtime/test/generated/tests/quantized_lstm.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/quantized_lstm.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, quantized_lstm) {
     execute(quantized_lstm::CreateModel,
             quantized_lstm::is_ignored,
-            quantized_lstm::examples);
+            quantized_lstm::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/random_multinomial.mod.py.cpp b/nn/runtime/test/generated/tests/random_multinomial.mod.py.cpp
index dedf4de..27462bb 100644
--- a/nn/runtime/test/generated/tests/random_multinomial.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/random_multinomial.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, random_multinomial) {
     execute(random_multinomial::CreateModel,
             random_multinomial::is_ignored,
-            random_multinomial::examples);
+            random_multinomial::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/relu1_float16_1.mod.py.cpp b/nn/runtime/test/generated/tests/relu1_float16_1.mod.py.cpp
index a8a723c..a30f0b2 100644
--- a/nn/runtime/test/generated/tests/relu1_float16_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/relu1_float16_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, relu1_float16_1) {
     execute(relu1_float16_1::CreateModel,
             relu1_float16_1::is_ignored,
-            relu1_float16_1::examples);
+            relu1_float16_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/relu1_float16_2.mod.py.cpp b/nn/runtime/test/generated/tests/relu1_float16_2.mod.py.cpp
index 3f08b86..b8129bb 100644
--- a/nn/runtime/test/generated/tests/relu1_float16_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/relu1_float16_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, relu1_float16_2) {
     execute(relu1_float16_2::CreateModel,
             relu1_float16_2::is_ignored,
-            relu1_float16_2::examples);
+            relu1_float16_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/relu1_float_1.mod.py.cpp b/nn/runtime/test/generated/tests/relu1_float_1.mod.py.cpp
index 2a12683..d31edec 100644
--- a/nn/runtime/test/generated/tests/relu1_float_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/relu1_float_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, relu1_float_1) {
     execute(relu1_float_1::CreateModel,
             relu1_float_1::is_ignored,
-            relu1_float_1::examples);
+            relu1_float_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/relu1_float_1_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/relu1_float_1_relaxed.mod.py.cpp
index f1d7aad..2422eed 100644
--- a/nn/runtime/test/generated/tests/relu1_float_1_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/relu1_float_1_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, relu1_float_1_relaxed) {
     execute(relu1_float_1_relaxed::CreateModel,
             relu1_float_1_relaxed::is_ignored,
-            relu1_float_1_relaxed::examples);
+            relu1_float_1_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/relu1_float_2.mod.py.cpp b/nn/runtime/test/generated/tests/relu1_float_2.mod.py.cpp
index af1b653..b0bbf07 100644
--- a/nn/runtime/test/generated/tests/relu1_float_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/relu1_float_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, relu1_float_2) {
     execute(relu1_float_2::CreateModel,
             relu1_float_2::is_ignored,
-            relu1_float_2::examples);
+            relu1_float_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/relu1_float_2_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/relu1_float_2_relaxed.mod.py.cpp
index f2809af..6119764 100644
--- a/nn/runtime/test/generated/tests/relu1_float_2_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/relu1_float_2_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, relu1_float_2_relaxed) {
     execute(relu1_float_2_relaxed::CreateModel,
             relu1_float_2_relaxed::is_ignored,
-            relu1_float_2_relaxed::examples);
+            relu1_float_2_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/relu1_quant8_1.mod.py.cpp b/nn/runtime/test/generated/tests/relu1_quant8_1.mod.py.cpp
index cb5909b..f8d4713 100644
--- a/nn/runtime/test/generated/tests/relu1_quant8_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/relu1_quant8_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, relu1_quant8_1) {
     execute(relu1_quant8_1::CreateModel,
             relu1_quant8_1::is_ignored,
-            relu1_quant8_1::examples);
+            relu1_quant8_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/relu1_quant8_2.mod.py.cpp b/nn/runtime/test/generated/tests/relu1_quant8_2.mod.py.cpp
index b9bffae..0c90e81 100644
--- a/nn/runtime/test/generated/tests/relu1_quant8_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/relu1_quant8_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, relu1_quant8_2) {
     execute(relu1_quant8_2::CreateModel,
             relu1_quant8_2::is_ignored,
-            relu1_quant8_2::examples);
+            relu1_quant8_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/relu6_float16_1.mod.py.cpp b/nn/runtime/test/generated/tests/relu6_float16_1.mod.py.cpp
index 5971190..305b847 100644
--- a/nn/runtime/test/generated/tests/relu6_float16_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/relu6_float16_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, relu6_float16_1) {
     execute(relu6_float16_1::CreateModel,
             relu6_float16_1::is_ignored,
-            relu6_float16_1::examples);
+            relu6_float16_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/relu6_float16_2.mod.py.cpp b/nn/runtime/test/generated/tests/relu6_float16_2.mod.py.cpp
index 7221dfe..3fa9ba6 100644
--- a/nn/runtime/test/generated/tests/relu6_float16_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/relu6_float16_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, relu6_float16_2) {
     execute(relu6_float16_2::CreateModel,
             relu6_float16_2::is_ignored,
-            relu6_float16_2::examples);
+            relu6_float16_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/relu6_float_1.mod.py.cpp b/nn/runtime/test/generated/tests/relu6_float_1.mod.py.cpp
index e3900cb..3e9dfe7 100644
--- a/nn/runtime/test/generated/tests/relu6_float_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/relu6_float_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, relu6_float_1) {
     execute(relu6_float_1::CreateModel,
             relu6_float_1::is_ignored,
-            relu6_float_1::examples);
+            relu6_float_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/relu6_float_1_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/relu6_float_1_relaxed.mod.py.cpp
index bfcc1e0..40aae8d 100644
--- a/nn/runtime/test/generated/tests/relu6_float_1_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/relu6_float_1_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, relu6_float_1_relaxed) {
     execute(relu6_float_1_relaxed::CreateModel,
             relu6_float_1_relaxed::is_ignored,
-            relu6_float_1_relaxed::examples);
+            relu6_float_1_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/relu6_float_2.mod.py.cpp b/nn/runtime/test/generated/tests/relu6_float_2.mod.py.cpp
index a06f859..5726fd3 100644
--- a/nn/runtime/test/generated/tests/relu6_float_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/relu6_float_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, relu6_float_2) {
     execute(relu6_float_2::CreateModel,
             relu6_float_2::is_ignored,
-            relu6_float_2::examples);
+            relu6_float_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/relu6_float_2_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/relu6_float_2_relaxed.mod.py.cpp
index eb6617a..5f2256b 100644
--- a/nn/runtime/test/generated/tests/relu6_float_2_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/relu6_float_2_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, relu6_float_2_relaxed) {
     execute(relu6_float_2_relaxed::CreateModel,
             relu6_float_2_relaxed::is_ignored,
-            relu6_float_2_relaxed::examples);
+            relu6_float_2_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/relu6_quant8_1.mod.py.cpp b/nn/runtime/test/generated/tests/relu6_quant8_1.mod.py.cpp
index fe241b5..f0b7c05 100644
--- a/nn/runtime/test/generated/tests/relu6_quant8_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/relu6_quant8_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, relu6_quant8_1) {
     execute(relu6_quant8_1::CreateModel,
             relu6_quant8_1::is_ignored,
-            relu6_quant8_1::examples);
+            relu6_quant8_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/relu6_quant8_2.mod.py.cpp b/nn/runtime/test/generated/tests/relu6_quant8_2.mod.py.cpp
index 3ae1887..a741af6 100644
--- a/nn/runtime/test/generated/tests/relu6_quant8_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/relu6_quant8_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, relu6_quant8_2) {
     execute(relu6_quant8_2::CreateModel,
             relu6_quant8_2::is_ignored,
-            relu6_quant8_2::examples);
+            relu6_quant8_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/relu_float16_1.mod.py.cpp b/nn/runtime/test/generated/tests/relu_float16_1.mod.py.cpp
index 3c452e3..8fec7a5 100644
--- a/nn/runtime/test/generated/tests/relu_float16_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/relu_float16_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, relu_float16_1) {
     execute(relu_float16_1::CreateModel,
             relu_float16_1::is_ignored,
-            relu_float16_1::examples);
+            relu_float16_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/relu_float16_2.mod.py.cpp b/nn/runtime/test/generated/tests/relu_float16_2.mod.py.cpp
index 2aa5dd4..d69bde3 100644
--- a/nn/runtime/test/generated/tests/relu_float16_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/relu_float16_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, relu_float16_2) {
     execute(relu_float16_2::CreateModel,
             relu_float16_2::is_ignored,
-            relu_float16_2::examples);
+            relu_float16_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/relu_float_1.mod.py.cpp b/nn/runtime/test/generated/tests/relu_float_1.mod.py.cpp
index e0d9f1a..3571c88 100644
--- a/nn/runtime/test/generated/tests/relu_float_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/relu_float_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, relu_float_1) {
     execute(relu_float_1::CreateModel,
             relu_float_1::is_ignored,
-            relu_float_1::examples);
+            relu_float_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/relu_float_1_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/relu_float_1_relaxed.mod.py.cpp
index 0036ccb..bc22d8c 100644
--- a/nn/runtime/test/generated/tests/relu_float_1_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/relu_float_1_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, relu_float_1_relaxed) {
     execute(relu_float_1_relaxed::CreateModel,
             relu_float_1_relaxed::is_ignored,
-            relu_float_1_relaxed::examples);
+            relu_float_1_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/relu_float_2.mod.py.cpp b/nn/runtime/test/generated/tests/relu_float_2.mod.py.cpp
index 57a300a..e43a145 100644
--- a/nn/runtime/test/generated/tests/relu_float_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/relu_float_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, relu_float_2) {
     execute(relu_float_2::CreateModel,
             relu_float_2::is_ignored,
-            relu_float_2::examples);
+            relu_float_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/relu_float_2_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/relu_float_2_relaxed.mod.py.cpp
index e239539..b2adf03 100644
--- a/nn/runtime/test/generated/tests/relu_float_2_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/relu_float_2_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, relu_float_2_relaxed) {
     execute(relu_float_2_relaxed::CreateModel,
             relu_float_2_relaxed::is_ignored,
-            relu_float_2_relaxed::examples);
+            relu_float_2_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/relu_quant8_1.mod.py.cpp b/nn/runtime/test/generated/tests/relu_quant8_1.mod.py.cpp
index 1ea5866..00d6a75 100644
--- a/nn/runtime/test/generated/tests/relu_quant8_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/relu_quant8_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, relu_quant8_1) {
     execute(relu_quant8_1::CreateModel,
             relu_quant8_1::is_ignored,
-            relu_quant8_1::examples);
+            relu_quant8_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/relu_quant8_2.mod.py.cpp b/nn/runtime/test/generated/tests/relu_quant8_2.mod.py.cpp
index 06da896..2c74fcf 100644
--- a/nn/runtime/test/generated/tests/relu_quant8_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/relu_quant8_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, relu_quant8_2) {
     execute(relu_quant8_2::CreateModel,
             relu_quant8_2::is_ignored,
-            relu_quant8_2::examples);
+            relu_quant8_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/reshape.mod.py.cpp b/nn/runtime/test/generated/tests/reshape.mod.py.cpp
index 92fb425..defa610 100644
--- a/nn/runtime/test/generated/tests/reshape.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/reshape.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, reshape) {
     execute(reshape::CreateModel,
             reshape::is_ignored,
-            reshape::examples);
+            reshape::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/reshape_float16.mod.py.cpp b/nn/runtime/test/generated/tests/reshape_float16.mod.py.cpp
new file mode 100644
index 0000000..17872a2
--- /dev/null
+++ b/nn/runtime/test/generated/tests/reshape_float16.mod.py.cpp
@@ -0,0 +1,17 @@
+// clang-format off
+// Generated file (from: reshape_float16.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace reshape_float16 {
+// Generated reshape_float16 test
+#include "generated/examples/reshape_float16.example.cpp"
+// Generated model constructor
+#include "generated/models/reshape_float16.model.cpp"
+} // namespace reshape_float16
+
+TEST_F(GeneratedTests, reshape_float16) {
+    execute(reshape_float16::CreateModel,
+            reshape_float16::is_ignored,
+            reshape_float16::get_examples());
+}
+
diff --git a/nn/runtime/test/generated/tests/reshape_quant8.mod.py.cpp b/nn/runtime/test/generated/tests/reshape_quant8.mod.py.cpp
index 35dee66..4bd6576 100644
--- a/nn/runtime/test/generated/tests/reshape_quant8.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/reshape_quant8.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, reshape_quant8) {
     execute(reshape_quant8::CreateModel,
             reshape_quant8::is_ignored,
-            reshape_quant8::examples);
+            reshape_quant8::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/reshape_quant8_weights_as_inputs.mod.py.cpp b/nn/runtime/test/generated/tests/reshape_quant8_weights_as_inputs.mod.py.cpp
index 905d20b..75ef4ad 100644
--- a/nn/runtime/test/generated/tests/reshape_quant8_weights_as_inputs.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/reshape_quant8_weights_as_inputs.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, reshape_quant8_weights_as_inputs) {
     execute(reshape_quant8_weights_as_inputs::CreateModel,
             reshape_quant8_weights_as_inputs::is_ignored,
-            reshape_quant8_weights_as_inputs::examples);
+            reshape_quant8_weights_as_inputs::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/reshape_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/reshape_relaxed.mod.py.cpp
index 014ff76..f210591 100644
--- a/nn/runtime/test/generated/tests/reshape_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/reshape_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, reshape_relaxed) {
     execute(reshape_relaxed::CreateModel,
             reshape_relaxed::is_ignored,
-            reshape_relaxed::examples);
+            reshape_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/reshape_weights_as_inputs.mod.py.cpp b/nn/runtime/test/generated/tests/reshape_weights_as_inputs.mod.py.cpp
index a985d6d..a177770 100644
--- a/nn/runtime/test/generated/tests/reshape_weights_as_inputs.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/reshape_weights_as_inputs.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, reshape_weights_as_inputs) {
     execute(reshape_weights_as_inputs::CreateModel,
             reshape_weights_as_inputs::is_ignored,
-            reshape_weights_as_inputs::examples);
+            reshape_weights_as_inputs::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/reshape_weights_as_inputs_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/reshape_weights_as_inputs_relaxed.mod.py.cpp
index 80dc5ff..5783793 100644
--- a/nn/runtime/test/generated/tests/reshape_weights_as_inputs_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/reshape_weights_as_inputs_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, reshape_weights_as_inputs_relaxed) {
     execute(reshape_weights_as_inputs_relaxed::CreateModel,
             reshape_weights_as_inputs_relaxed::is_ignored,
-            reshape_weights_as_inputs_relaxed::examples);
+            reshape_weights_as_inputs_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/resize_bilinear.mod.py.cpp b/nn/runtime/test/generated/tests/resize_bilinear.mod.py.cpp
index 360e42d..788f0a4 100644
--- a/nn/runtime/test/generated/tests/resize_bilinear.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/resize_bilinear.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, resize_bilinear) {
     execute(resize_bilinear::CreateModel,
             resize_bilinear::is_ignored,
-            resize_bilinear::examples);
+            resize_bilinear::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/resize_bilinear_2.mod.py.cpp b/nn/runtime/test/generated/tests/resize_bilinear_2.mod.py.cpp
index 9d3e635..45fe613 100644
--- a/nn/runtime/test/generated/tests/resize_bilinear_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/resize_bilinear_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, resize_bilinear_2) {
     execute(resize_bilinear_2::CreateModel,
             resize_bilinear_2::is_ignored,
-            resize_bilinear_2::examples);
+            resize_bilinear_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/resize_bilinear_2_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/resize_bilinear_2_relaxed.mod.py.cpp
index 3e3edee..7491a4e 100644
--- a/nn/runtime/test/generated/tests/resize_bilinear_2_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/resize_bilinear_2_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, resize_bilinear_2_relaxed) {
     execute(resize_bilinear_2_relaxed::CreateModel,
             resize_bilinear_2_relaxed::is_ignored,
-            resize_bilinear_2_relaxed::examples);
+            resize_bilinear_2_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/resize_bilinear_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/resize_bilinear_relaxed.mod.py.cpp
index 92ca320..300d9dc 100644
--- a/nn/runtime/test/generated/tests/resize_bilinear_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/resize_bilinear_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, resize_bilinear_relaxed) {
     execute(resize_bilinear_relaxed::CreateModel,
             resize_bilinear_relaxed::is_ignored,
-            resize_bilinear_relaxed::examples);
+            resize_bilinear_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/resize_bilinear_v1_2.mod.py.cpp b/nn/runtime/test/generated/tests/resize_bilinear_v1_2.mod.py.cpp
index 5e6ab8e..347d84a 100644
--- a/nn/runtime/test/generated/tests/resize_bilinear_v1_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/resize_bilinear_v1_2.mod.py.cpp
@@ -12,48 +12,72 @@
 TEST_F(GeneratedTests, resize_bilinear_v1_2_nhwc) {
     execute(resize_bilinear_v1_2::CreateModel_nhwc,
             resize_bilinear_v1_2::is_ignored_nhwc,
-            resize_bilinear_v1_2::examples_nhwc);
+            resize_bilinear_v1_2::get_examples_nhwc());
 }
 
 TEST_F(GeneratedTests, resize_bilinear_v1_2_nhwc_relaxed) {
     execute(resize_bilinear_v1_2::CreateModel_nhwc_relaxed,
             resize_bilinear_v1_2::is_ignored_nhwc_relaxed,
-            resize_bilinear_v1_2::examples_nhwc_relaxed);
+            resize_bilinear_v1_2::get_examples_nhwc_relaxed());
+}
+
+TEST_F(GeneratedTests, resize_bilinear_v1_2_nhwc_float16) {
+    execute(resize_bilinear_v1_2::CreateModel_nhwc_float16,
+            resize_bilinear_v1_2::is_ignored_nhwc_float16,
+            resize_bilinear_v1_2::get_examples_nhwc_float16());
 }
 
 TEST_F(GeneratedTests, resize_bilinear_v1_2_nchw) {
     execute(resize_bilinear_v1_2::CreateModel_nchw,
             resize_bilinear_v1_2::is_ignored_nchw,
-            resize_bilinear_v1_2::examples_nchw);
+            resize_bilinear_v1_2::get_examples_nchw());
 }
 
 TEST_F(GeneratedTests, resize_bilinear_v1_2_nchw_relaxed) {
     execute(resize_bilinear_v1_2::CreateModel_nchw_relaxed,
             resize_bilinear_v1_2::is_ignored_nchw_relaxed,
-            resize_bilinear_v1_2::examples_nchw_relaxed);
+            resize_bilinear_v1_2::get_examples_nchw_relaxed());
+}
+
+TEST_F(GeneratedTests, resize_bilinear_v1_2_nchw_float16) {
+    execute(resize_bilinear_v1_2::CreateModel_nchw_float16,
+            resize_bilinear_v1_2::is_ignored_nchw_float16,
+            resize_bilinear_v1_2::get_examples_nchw_float16());
 }
 
 TEST_F(GeneratedTests, resize_bilinear_v1_2_nhwc_2) {
     execute(resize_bilinear_v1_2::CreateModel_nhwc_2,
             resize_bilinear_v1_2::is_ignored_nhwc_2,
-            resize_bilinear_v1_2::examples_nhwc_2);
+            resize_bilinear_v1_2::get_examples_nhwc_2());
 }
 
 TEST_F(GeneratedTests, resize_bilinear_v1_2_nhwc_relaxed_2) {
     execute(resize_bilinear_v1_2::CreateModel_nhwc_relaxed_2,
             resize_bilinear_v1_2::is_ignored_nhwc_relaxed_2,
-            resize_bilinear_v1_2::examples_nhwc_relaxed_2);
+            resize_bilinear_v1_2::get_examples_nhwc_relaxed_2());
+}
+
+TEST_F(GeneratedTests, resize_bilinear_v1_2_nhwc_float16_2) {
+    execute(resize_bilinear_v1_2::CreateModel_nhwc_float16_2,
+            resize_bilinear_v1_2::is_ignored_nhwc_float16_2,
+            resize_bilinear_v1_2::get_examples_nhwc_float16_2());
 }
 
 TEST_F(GeneratedTests, resize_bilinear_v1_2_nchw_2) {
     execute(resize_bilinear_v1_2::CreateModel_nchw_2,
             resize_bilinear_v1_2::is_ignored_nchw_2,
-            resize_bilinear_v1_2::examples_nchw_2);
+            resize_bilinear_v1_2::get_examples_nchw_2());
 }
 
 TEST_F(GeneratedTests, resize_bilinear_v1_2_nchw_relaxed_2) {
     execute(resize_bilinear_v1_2::CreateModel_nchw_relaxed_2,
             resize_bilinear_v1_2::is_ignored_nchw_relaxed_2,
-            resize_bilinear_v1_2::examples_nchw_relaxed_2);
+            resize_bilinear_v1_2::get_examples_nchw_relaxed_2());
+}
+
+TEST_F(GeneratedTests, resize_bilinear_v1_2_nchw_float16_2) {
+    execute(resize_bilinear_v1_2::CreateModel_nchw_float16_2,
+            resize_bilinear_v1_2::is_ignored_nchw_float16_2,
+            resize_bilinear_v1_2::get_examples_nchw_float16_2());
 }
 
diff --git a/nn/runtime/test/generated/tests/rnn.mod.py.cpp b/nn/runtime/test/generated/tests/rnn.mod.py.cpp
index 5fb3b76..3df79ea 100644
--- a/nn/runtime/test/generated/tests/rnn.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/rnn.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, rnn) {
     execute(rnn::CreateModel,
             rnn::is_ignored,
-            rnn::examples);
+            rnn::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/rnn_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/rnn_relaxed.mod.py.cpp
index 71e566d..ecc6c01 100644
--- a/nn/runtime/test/generated/tests/rnn_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/rnn_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, rnn_relaxed) {
     execute(rnn_relaxed::CreateModel,
             rnn_relaxed::is_ignored,
-            rnn_relaxed::examples);
+            rnn_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/rnn_state.mod.py.cpp b/nn/runtime/test/generated/tests/rnn_state.mod.py.cpp
index b071289..29596f6 100644
--- a/nn/runtime/test/generated/tests/rnn_state.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/rnn_state.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, rnn_state) {
     execute(rnn_state::CreateModel,
             rnn_state::is_ignored,
-            rnn_state::examples);
+            rnn_state::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/rnn_state_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/rnn_state_relaxed.mod.py.cpp
index 36f7021..1ed9028 100644
--- a/nn/runtime/test/generated/tests/rnn_state_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/rnn_state_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, rnn_state_relaxed) {
     execute(rnn_state_relaxed::CreateModel,
             rnn_state_relaxed::is_ignored,
-            rnn_state_relaxed::examples);
+            rnn_state_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/roi_align.mod.py.cpp b/nn/runtime/test/generated/tests/roi_align.mod.py.cpp
index 7314521..82cc178 100644
--- a/nn/runtime/test/generated/tests/roi_align.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/roi_align.mod.py.cpp
@@ -12,108 +12,108 @@
 TEST_F(GeneratedTests, roi_align_nhwc) {
     execute(roi_align::CreateModel_nhwc,
             roi_align::is_ignored_nhwc,
-            roi_align::examples_nhwc);
+            roi_align::get_examples_nhwc());
 }
 
 TEST_F(GeneratedTests, roi_align_nhwc_relaxed) {
     execute(roi_align::CreateModel_nhwc_relaxed,
             roi_align::is_ignored_nhwc_relaxed,
-            roi_align::examples_nhwc_relaxed);
+            roi_align::get_examples_nhwc_relaxed());
 }
 
 TEST_F(GeneratedTests, roi_align_nhwc_quant8) {
     execute(roi_align::CreateModel_nhwc_quant8,
             roi_align::is_ignored_nhwc_quant8,
-            roi_align::examples_nhwc_quant8);
+            roi_align::get_examples_nhwc_quant8());
 }
 
 TEST_F(GeneratedTests, roi_align_nchw) {
     execute(roi_align::CreateModel_nchw,
             roi_align::is_ignored_nchw,
-            roi_align::examples_nchw);
+            roi_align::get_examples_nchw());
 }
 
 TEST_F(GeneratedTests, roi_align_nchw_relaxed) {
     execute(roi_align::CreateModel_nchw_relaxed,
             roi_align::is_ignored_nchw_relaxed,
-            roi_align::examples_nchw_relaxed);
+            roi_align::get_examples_nchw_relaxed());
 }
 
 TEST_F(GeneratedTests, roi_align_nchw_quant8) {
     execute(roi_align::CreateModel_nchw_quant8,
             roi_align::is_ignored_nchw_quant8,
-            roi_align::examples_nchw_quant8);
+            roi_align::get_examples_nchw_quant8());
 }
 
 TEST_F(GeneratedTests, roi_align_nhwc_2) {
     execute(roi_align::CreateModel_nhwc_2,
             roi_align::is_ignored_nhwc_2,
-            roi_align::examples_nhwc_2);
+            roi_align::get_examples_nhwc_2());
 }
 
 TEST_F(GeneratedTests, roi_align_nhwc_relaxed_2) {
     execute(roi_align::CreateModel_nhwc_relaxed_2,
             roi_align::is_ignored_nhwc_relaxed_2,
-            roi_align::examples_nhwc_relaxed_2);
+            roi_align::get_examples_nhwc_relaxed_2());
 }
 
 TEST_F(GeneratedTests, roi_align_nhwc_quant8_2) {
     execute(roi_align::CreateModel_nhwc_quant8_2,
             roi_align::is_ignored_nhwc_quant8_2,
-            roi_align::examples_nhwc_quant8_2);
+            roi_align::get_examples_nhwc_quant8_2());
 }
 
 TEST_F(GeneratedTests, roi_align_nchw_2) {
     execute(roi_align::CreateModel_nchw_2,
             roi_align::is_ignored_nchw_2,
-            roi_align::examples_nchw_2);
+            roi_align::get_examples_nchw_2());
 }
 
 TEST_F(GeneratedTests, roi_align_nchw_relaxed_2) {
     execute(roi_align::CreateModel_nchw_relaxed_2,
             roi_align::is_ignored_nchw_relaxed_2,
-            roi_align::examples_nchw_relaxed_2);
+            roi_align::get_examples_nchw_relaxed_2());
 }
 
 TEST_F(GeneratedTests, roi_align_nchw_quant8_2) {
     execute(roi_align::CreateModel_nchw_quant8_2,
             roi_align::is_ignored_nchw_quant8_2,
-            roi_align::examples_nchw_quant8_2);
+            roi_align::get_examples_nchw_quant8_2());
 }
 
 TEST_F(GeneratedTests, roi_align_nhwc_3) {
     execute(roi_align::CreateModel_nhwc_3,
             roi_align::is_ignored_nhwc_3,
-            roi_align::examples_nhwc_3);
+            roi_align::get_examples_nhwc_3());
 }
 
 TEST_F(GeneratedTests, roi_align_nhwc_relaxed_3) {
     execute(roi_align::CreateModel_nhwc_relaxed_3,
             roi_align::is_ignored_nhwc_relaxed_3,
-            roi_align::examples_nhwc_relaxed_3);
+            roi_align::get_examples_nhwc_relaxed_3());
 }
 
 TEST_F(GeneratedTests, roi_align_nhwc_quant8_3) {
     execute(roi_align::CreateModel_nhwc_quant8_3,
             roi_align::is_ignored_nhwc_quant8_3,
-            roi_align::examples_nhwc_quant8_3);
+            roi_align::get_examples_nhwc_quant8_3());
 }
 
 TEST_F(GeneratedTests, roi_align_nchw_3) {
     execute(roi_align::CreateModel_nchw_3,
             roi_align::is_ignored_nchw_3,
-            roi_align::examples_nchw_3);
+            roi_align::get_examples_nchw_3());
 }
 
 TEST_F(GeneratedTests, roi_align_nchw_relaxed_3) {
     execute(roi_align::CreateModel_nchw_relaxed_3,
             roi_align::is_ignored_nchw_relaxed_3,
-            roi_align::examples_nchw_relaxed_3);
+            roi_align::get_examples_nchw_relaxed_3());
 }
 
 TEST_F(GeneratedTests, roi_align_nchw_quant8_3) {
     execute(roi_align::CreateModel_nchw_quant8_3,
             roi_align::is_ignored_nchw_quant8_3,
-            roi_align::examples_nchw_quant8_3);
+            roi_align::get_examples_nchw_quant8_3());
 }
 
diff --git a/nn/runtime/test/generated/tests/roi_pooling.mod.py.cpp b/nn/runtime/test/generated/tests/roi_pooling.mod.py.cpp
new file mode 100644
index 0000000..923e9ea
--- /dev/null
+++ b/nn/runtime/test/generated/tests/roi_pooling.mod.py.cpp
@@ -0,0 +1,83 @@
+// clang-format off
+// Generated file (from: roi_pooling.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace roi_pooling {
+// Generated roi_pooling test
+#include "generated/examples/roi_pooling.example.cpp"
+// Generated model constructor
+#include "generated/models/roi_pooling.model.cpp"
+} // namespace roi_pooling
+
+TEST_F(GeneratedTests, roi_pooling_nhwc) {
+    execute(roi_pooling::CreateModel_nhwc,
+            roi_pooling::is_ignored_nhwc,
+            roi_pooling::get_examples_nhwc());
+}
+
+TEST_F(GeneratedTests, roi_pooling_nhwc_relaxed) {
+    execute(roi_pooling::CreateModel_nhwc_relaxed,
+            roi_pooling::is_ignored_nhwc_relaxed,
+            roi_pooling::get_examples_nhwc_relaxed());
+}
+
+TEST_F(GeneratedTests, roi_pooling_nhwc_quant8) {
+    execute(roi_pooling::CreateModel_nhwc_quant8,
+            roi_pooling::is_ignored_nhwc_quant8,
+            roi_pooling::get_examples_nhwc_quant8());
+}
+
+TEST_F(GeneratedTests, roi_pooling_nchw) {
+    execute(roi_pooling::CreateModel_nchw,
+            roi_pooling::is_ignored_nchw,
+            roi_pooling::get_examples_nchw());
+}
+
+TEST_F(GeneratedTests, roi_pooling_nchw_relaxed) {
+    execute(roi_pooling::CreateModel_nchw_relaxed,
+            roi_pooling::is_ignored_nchw_relaxed,
+            roi_pooling::get_examples_nchw_relaxed());
+}
+
+TEST_F(GeneratedTests, roi_pooling_nchw_quant8) {
+    execute(roi_pooling::CreateModel_nchw_quant8,
+            roi_pooling::is_ignored_nchw_quant8,
+            roi_pooling::get_examples_nchw_quant8());
+}
+
+TEST_F(GeneratedTests, roi_pooling_nhwc_2) {
+    execute(roi_pooling::CreateModel_nhwc_2,
+            roi_pooling::is_ignored_nhwc_2,
+            roi_pooling::get_examples_nhwc_2());
+}
+
+TEST_F(GeneratedTests, roi_pooling_nhwc_relaxed_2) {
+    execute(roi_pooling::CreateModel_nhwc_relaxed_2,
+            roi_pooling::is_ignored_nhwc_relaxed_2,
+            roi_pooling::get_examples_nhwc_relaxed_2());
+}
+
+TEST_F(GeneratedTests, roi_pooling_nhwc_quant8_2) {
+    execute(roi_pooling::CreateModel_nhwc_quant8_2,
+            roi_pooling::is_ignored_nhwc_quant8_2,
+            roi_pooling::get_examples_nhwc_quant8_2());
+}
+
+TEST_F(GeneratedTests, roi_pooling_nchw_2) {
+    execute(roi_pooling::CreateModel_nchw_2,
+            roi_pooling::is_ignored_nchw_2,
+            roi_pooling::get_examples_nchw_2());
+}
+
+TEST_F(GeneratedTests, roi_pooling_nchw_relaxed_2) {
+    execute(roi_pooling::CreateModel_nchw_relaxed_2,
+            roi_pooling::is_ignored_nchw_relaxed_2,
+            roi_pooling::get_examples_nchw_relaxed_2());
+}
+
+TEST_F(GeneratedTests, roi_pooling_nchw_quant8_2) {
+    execute(roi_pooling::CreateModel_nchw_quant8_2,
+            roi_pooling::is_ignored_nchw_quant8_2,
+            roi_pooling::get_examples_nchw_quant8_2());
+}
+
diff --git a/nn/runtime/test/generated/tests/rotated_bbox_transform.mod.py.cpp b/nn/runtime/test/generated/tests/rotated_bbox_transform.mod.py.cpp
index 5f54514..0817e5a 100644
--- a/nn/runtime/test/generated/tests/rotated_bbox_transform.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/rotated_bbox_transform.mod.py.cpp
@@ -12,48 +12,48 @@
 TEST_F(GeneratedTests, rotated_bbox_transform) {
     execute(rotated_bbox_transform::CreateModel,
             rotated_bbox_transform::is_ignored,
-            rotated_bbox_transform::examples);
+            rotated_bbox_transform::get_examples());
 }
 
 TEST_F(GeneratedTests, rotated_bbox_transform_relaxed) {
     execute(rotated_bbox_transform::CreateModel_relaxed,
             rotated_bbox_transform::is_ignored_relaxed,
-            rotated_bbox_transform::examples_relaxed);
+            rotated_bbox_transform::get_examples_relaxed());
 }
 
 TEST_F(GeneratedTests, rotated_bbox_transform_2) {
     execute(rotated_bbox_transform::CreateModel_2,
             rotated_bbox_transform::is_ignored_2,
-            rotated_bbox_transform::examples_2);
+            rotated_bbox_transform::get_examples_2());
 }
 
 TEST_F(GeneratedTests, rotated_bbox_transform_relaxed_2) {
     execute(rotated_bbox_transform::CreateModel_relaxed_2,
             rotated_bbox_transform::is_ignored_relaxed_2,
-            rotated_bbox_transform::examples_relaxed_2);
+            rotated_bbox_transform::get_examples_relaxed_2());
 }
 
 TEST_F(GeneratedTests, rotated_bbox_transform_3) {
     execute(rotated_bbox_transform::CreateModel_3,
             rotated_bbox_transform::is_ignored_3,
-            rotated_bbox_transform::examples_3);
+            rotated_bbox_transform::get_examples_3());
 }
 
 TEST_F(GeneratedTests, rotated_bbox_transform_relaxed_3) {
     execute(rotated_bbox_transform::CreateModel_relaxed_3,
             rotated_bbox_transform::is_ignored_relaxed_3,
-            rotated_bbox_transform::examples_relaxed_3);
+            rotated_bbox_transform::get_examples_relaxed_3());
 }
 
 TEST_F(GeneratedTests, rotated_bbox_transform_single_batch) {
     execute(rotated_bbox_transform::CreateModel_single_batch,
             rotated_bbox_transform::is_ignored_single_batch,
-            rotated_bbox_transform::examples_single_batch);
+            rotated_bbox_transform::get_examples_single_batch());
 }
 
 TEST_F(GeneratedTests, rotated_bbox_transform_single_batch_relaxed) {
     execute(rotated_bbox_transform::CreateModel_single_batch_relaxed,
             rotated_bbox_transform::is_ignored_single_batch_relaxed,
-            rotated_bbox_transform::examples_single_batch_relaxed);
+            rotated_bbox_transform::get_examples_single_batch_relaxed());
 }
 
diff --git a/nn/runtime/test/generated/tests/slice.mod.py.cpp b/nn/runtime/test/generated/tests/slice.mod.py.cpp
new file mode 100644
index 0000000..600affc
--- /dev/null
+++ b/nn/runtime/test/generated/tests/slice.mod.py.cpp
@@ -0,0 +1,155 @@
+// clang-format off
+// Generated file (from: slice.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace slice {
+// Generated slice test
+#include "generated/examples/slice.example.cpp"
+// Generated model constructor
+#include "generated/models/slice.model.cpp"
+} // namespace slice
+
+TEST_F(GeneratedTests, slice) {
+    execute(slice::CreateModel,
+            slice::is_ignored,
+            slice::examples);
+}
+
+TEST_F(GeneratedTests, slice_relaxed) {
+    execute(slice::CreateModel_relaxed,
+            slice::is_ignored_relaxed,
+            slice::examples_relaxed);
+}
+
+TEST_F(GeneratedTests, slice_float16) {
+    execute(slice::CreateModel_float16,
+            slice::is_ignored_float16,
+            slice::examples_float16);
+}
+
+TEST_F(GeneratedTests, slice_2) {
+    execute(slice::CreateModel_2,
+            slice::is_ignored_2,
+            slice::examples_2);
+}
+
+TEST_F(GeneratedTests, slice_relaxed_2) {
+    execute(slice::CreateModel_relaxed_2,
+            slice::is_ignored_relaxed_2,
+            slice::examples_relaxed_2);
+}
+
+TEST_F(GeneratedTests, slice_float16_2) {
+    execute(slice::CreateModel_float16_2,
+            slice::is_ignored_float16_2,
+            slice::examples_float16_2);
+}
+
+TEST_F(GeneratedTests, slice_3) {
+    execute(slice::CreateModel_3,
+            slice::is_ignored_3,
+            slice::examples_3);
+}
+
+TEST_F(GeneratedTests, slice_relaxed_3) {
+    execute(slice::CreateModel_relaxed_3,
+            slice::is_ignored_relaxed_3,
+            slice::examples_relaxed_3);
+}
+
+TEST_F(GeneratedTests, slice_float16_3) {
+    execute(slice::CreateModel_float16_3,
+            slice::is_ignored_float16_3,
+            slice::examples_float16_3);
+}
+
+TEST_F(GeneratedTests, slice_4) {
+    execute(slice::CreateModel_4,
+            slice::is_ignored_4,
+            slice::examples_4);
+}
+
+TEST_F(GeneratedTests, slice_relaxed_4) {
+    execute(slice::CreateModel_relaxed_4,
+            slice::is_ignored_relaxed_4,
+            slice::examples_relaxed_4);
+}
+
+TEST_F(GeneratedTests, slice_float16_4) {
+    execute(slice::CreateModel_float16_4,
+            slice::is_ignored_float16_4,
+            slice::examples_float16_4);
+}
+
+TEST_F(GeneratedTests, slice_5) {
+    execute(slice::CreateModel_5,
+            slice::is_ignored_5,
+            slice::examples_5);
+}
+
+TEST_F(GeneratedTests, slice_relaxed_5) {
+    execute(slice::CreateModel_relaxed_5,
+            slice::is_ignored_relaxed_5,
+            slice::examples_relaxed_5);
+}
+
+TEST_F(GeneratedTests, slice_float16_5) {
+    execute(slice::CreateModel_float16_5,
+            slice::is_ignored_float16_5,
+            slice::examples_float16_5);
+}
+
+TEST_F(GeneratedTests, slice_6) {
+    execute(slice::CreateModel_6,
+            slice::is_ignored_6,
+            slice::examples_6);
+}
+
+TEST_F(GeneratedTests, slice_relaxed_6) {
+    execute(slice::CreateModel_relaxed_6,
+            slice::is_ignored_relaxed_6,
+            slice::examples_relaxed_6);
+}
+
+TEST_F(GeneratedTests, slice_float16_6) {
+    execute(slice::CreateModel_float16_6,
+            slice::is_ignored_float16_6,
+            slice::examples_float16_6);
+}
+
+TEST_F(GeneratedTests, slice_7) {
+    execute(slice::CreateModel_7,
+            slice::is_ignored_7,
+            slice::examples_7);
+}
+
+TEST_F(GeneratedTests, slice_relaxed_7) {
+    execute(slice::CreateModel_relaxed_7,
+            slice::is_ignored_relaxed_7,
+            slice::examples_relaxed_7);
+}
+
+TEST_F(GeneratedTests, slice_float16_7) {
+    execute(slice::CreateModel_float16_7,
+            slice::is_ignored_float16_7,
+            slice::examples_float16_7);
+}
+
+TEST_F(GeneratedTests, slice_8) {
+    execute(slice::CreateModel_8,
+            slice::is_ignored_8,
+            slice::examples_8);
+}
+
+TEST_F(GeneratedTests, slice_relaxed_8) {
+    execute(slice::CreateModel_relaxed_8,
+            slice::is_ignored_relaxed_8,
+            slice::examples_relaxed_8);
+}
+
+TEST_F(GeneratedTests, slice_float16_8) {
+    execute(slice::CreateModel_float16_8,
+            slice::is_ignored_float16_8,
+            slice::examples_float16_8);
+}
+
diff --git a/nn/runtime/test/generated/tests/softmax_float_1.mod.py.cpp b/nn/runtime/test/generated/tests/softmax_float_1.mod.py.cpp
index 609633d..56fe0cc 100644
--- a/nn/runtime/test/generated/tests/softmax_float_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/softmax_float_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, softmax_float_1) {
     execute(softmax_float_1::CreateModel,
             softmax_float_1::is_ignored,
-            softmax_float_1::examples);
+            softmax_float_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/softmax_float_1_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/softmax_float_1_relaxed.mod.py.cpp
index fb3a8e1..3c35275 100644
--- a/nn/runtime/test/generated/tests/softmax_float_1_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/softmax_float_1_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, softmax_float_1_relaxed) {
     execute(softmax_float_1_relaxed::CreateModel,
             softmax_float_1_relaxed::is_ignored,
-            softmax_float_1_relaxed::examples);
+            softmax_float_1_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/softmax_float_2.mod.py.cpp b/nn/runtime/test/generated/tests/softmax_float_2.mod.py.cpp
index e9bcf1c..e5262f8 100644
--- a/nn/runtime/test/generated/tests/softmax_float_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/softmax_float_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, softmax_float_2) {
     execute(softmax_float_2::CreateModel,
             softmax_float_2::is_ignored,
-            softmax_float_2::examples);
+            softmax_float_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/softmax_float_2_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/softmax_float_2_relaxed.mod.py.cpp
index 9cbacb0..34d549b 100644
--- a/nn/runtime/test/generated/tests/softmax_float_2_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/softmax_float_2_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, softmax_float_2_relaxed) {
     execute(softmax_float_2_relaxed::CreateModel,
             softmax_float_2_relaxed::is_ignored,
-            softmax_float_2_relaxed::examples);
+            softmax_float_2_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/softmax_quant8_1.mod.py.cpp b/nn/runtime/test/generated/tests/softmax_quant8_1.mod.py.cpp
index 9630571..adbb503 100644
--- a/nn/runtime/test/generated/tests/softmax_quant8_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/softmax_quant8_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, softmax_quant8_1) {
     execute(softmax_quant8_1::CreateModel,
             softmax_quant8_1::is_ignored,
-            softmax_quant8_1::examples);
+            softmax_quant8_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/softmax_quant8_2.mod.py.cpp b/nn/runtime/test/generated/tests/softmax_quant8_2.mod.py.cpp
index a44a160..bf3ab15 100644
--- a/nn/runtime/test/generated/tests/softmax_quant8_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/softmax_quant8_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, softmax_quant8_2) {
     execute(softmax_quant8_2::CreateModel,
             softmax_quant8_2::is_ignored,
-            softmax_quant8_2::examples);
+            softmax_quant8_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/softmax_v1_2.mod.py.cpp b/nn/runtime/test/generated/tests/softmax_v1_2.mod.py.cpp
index 54cded3..a0d2fe3 100644
--- a/nn/runtime/test/generated/tests/softmax_v1_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/softmax_v1_2.mod.py.cpp
@@ -12,1104 +12,1104 @@
 TEST_F(GeneratedTests, softmax_v1_2) {
     execute(softmax_v1_2::CreateModel,
             softmax_v1_2::is_ignored,
-            softmax_v1_2::examples);
+            softmax_v1_2::get_examples());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_dim1_axis0) {
     execute(softmax_v1_2::CreateModel_dim1_axis0,
             softmax_v1_2::is_ignored_dim1_axis0,
-            softmax_v1_2::examples_dim1_axis0);
+            softmax_v1_2::get_examples_dim1_axis0());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_dim3_axis2) {
     execute(softmax_v1_2::CreateModel_dim3_axis2,
             softmax_v1_2::is_ignored_dim3_axis2,
-            softmax_v1_2::examples_dim3_axis2);
+            softmax_v1_2::get_examples_dim3_axis2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_relaxed) {
     execute(softmax_v1_2::CreateModel_relaxed,
             softmax_v1_2::is_ignored_relaxed,
-            softmax_v1_2::examples_relaxed);
+            softmax_v1_2::get_examples_relaxed());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_relaxed_dim1_axis0) {
     execute(softmax_v1_2::CreateModel_relaxed_dim1_axis0,
             softmax_v1_2::is_ignored_relaxed_dim1_axis0,
-            softmax_v1_2::examples_relaxed_dim1_axis0);
+            softmax_v1_2::get_examples_relaxed_dim1_axis0());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_relaxed_dim3_axis2) {
     execute(softmax_v1_2::CreateModel_relaxed_dim3_axis2,
             softmax_v1_2::is_ignored_relaxed_dim3_axis2,
-            softmax_v1_2::examples_relaxed_dim3_axis2);
+            softmax_v1_2::get_examples_relaxed_dim3_axis2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_float16) {
     execute(softmax_v1_2::CreateModel_float16,
             softmax_v1_2::is_ignored_float16,
-            softmax_v1_2::examples_float16);
+            softmax_v1_2::get_examples_float16());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_float16_dim1_axis0) {
     execute(softmax_v1_2::CreateModel_float16_dim1_axis0,
             softmax_v1_2::is_ignored_float16_dim1_axis0,
-            softmax_v1_2::examples_float16_dim1_axis0);
+            softmax_v1_2::get_examples_float16_dim1_axis0());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_float16_dim3_axis2) {
     execute(softmax_v1_2::CreateModel_float16_dim3_axis2,
             softmax_v1_2::is_ignored_float16_dim3_axis2,
-            softmax_v1_2::examples_float16_dim3_axis2);
+            softmax_v1_2::get_examples_float16_dim3_axis2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_quant8) {
     execute(softmax_v1_2::CreateModel_quant8,
             softmax_v1_2::is_ignored_quant8,
-            softmax_v1_2::examples_quant8);
+            softmax_v1_2::get_examples_quant8());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_quant8_dim1_axis0) {
     execute(softmax_v1_2::CreateModel_quant8_dim1_axis0,
             softmax_v1_2::is_ignored_quant8_dim1_axis0,
-            softmax_v1_2::examples_quant8_dim1_axis0);
+            softmax_v1_2::get_examples_quant8_dim1_axis0());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_quant8_dim3_axis2) {
     execute(softmax_v1_2::CreateModel_quant8_dim3_axis2,
             softmax_v1_2::is_ignored_quant8_dim3_axis2,
-            softmax_v1_2::examples_quant8_dim3_axis2);
+            softmax_v1_2::get_examples_quant8_dim3_axis2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_2) {
     execute(softmax_v1_2::CreateModel_2,
             softmax_v1_2::is_ignored_2,
-            softmax_v1_2::examples_2);
+            softmax_v1_2::get_examples_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_dim1_axis0_2) {
     execute(softmax_v1_2::CreateModel_dim1_axis0_2,
             softmax_v1_2::is_ignored_dim1_axis0_2,
-            softmax_v1_2::examples_dim1_axis0_2);
+            softmax_v1_2::get_examples_dim1_axis0_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_dim3_axis2_2) {
     execute(softmax_v1_2::CreateModel_dim3_axis2_2,
             softmax_v1_2::is_ignored_dim3_axis2_2,
-            softmax_v1_2::examples_dim3_axis2_2);
+            softmax_v1_2::get_examples_dim3_axis2_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_relaxed_2) {
     execute(softmax_v1_2::CreateModel_relaxed_2,
             softmax_v1_2::is_ignored_relaxed_2,
-            softmax_v1_2::examples_relaxed_2);
+            softmax_v1_2::get_examples_relaxed_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_relaxed_dim1_axis0_2) {
     execute(softmax_v1_2::CreateModel_relaxed_dim1_axis0_2,
             softmax_v1_2::is_ignored_relaxed_dim1_axis0_2,
-            softmax_v1_2::examples_relaxed_dim1_axis0_2);
+            softmax_v1_2::get_examples_relaxed_dim1_axis0_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_relaxed_dim3_axis2_2) {
     execute(softmax_v1_2::CreateModel_relaxed_dim3_axis2_2,
             softmax_v1_2::is_ignored_relaxed_dim3_axis2_2,
-            softmax_v1_2::examples_relaxed_dim3_axis2_2);
+            softmax_v1_2::get_examples_relaxed_dim3_axis2_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_float16_2) {
     execute(softmax_v1_2::CreateModel_float16_2,
             softmax_v1_2::is_ignored_float16_2,
-            softmax_v1_2::examples_float16_2);
+            softmax_v1_2::get_examples_float16_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_float16_dim1_axis0_2) {
     execute(softmax_v1_2::CreateModel_float16_dim1_axis0_2,
             softmax_v1_2::is_ignored_float16_dim1_axis0_2,
-            softmax_v1_2::examples_float16_dim1_axis0_2);
+            softmax_v1_2::get_examples_float16_dim1_axis0_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_float16_dim3_axis2_2) {
     execute(softmax_v1_2::CreateModel_float16_dim3_axis2_2,
             softmax_v1_2::is_ignored_float16_dim3_axis2_2,
-            softmax_v1_2::examples_float16_dim3_axis2_2);
+            softmax_v1_2::get_examples_float16_dim3_axis2_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_quant8_2) {
     execute(softmax_v1_2::CreateModel_quant8_2,
             softmax_v1_2::is_ignored_quant8_2,
-            softmax_v1_2::examples_quant8_2);
+            softmax_v1_2::get_examples_quant8_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_quant8_dim1_axis0_2) {
     execute(softmax_v1_2::CreateModel_quant8_dim1_axis0_2,
             softmax_v1_2::is_ignored_quant8_dim1_axis0_2,
-            softmax_v1_2::examples_quant8_dim1_axis0_2);
+            softmax_v1_2::get_examples_quant8_dim1_axis0_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_quant8_dim3_axis2_2) {
     execute(softmax_v1_2::CreateModel_quant8_dim3_axis2_2,
             softmax_v1_2::is_ignored_quant8_dim3_axis2_2,
-            softmax_v1_2::examples_quant8_dim3_axis2_2);
+            softmax_v1_2::get_examples_quant8_dim3_axis2_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim4_axis0) {
     execute(softmax_v1_2::CreateModel_axis_dim4_axis0,
             softmax_v1_2::is_ignored_axis_dim4_axis0,
-            softmax_v1_2::examples_axis_dim4_axis0);
+            softmax_v1_2::get_examples_axis_dim4_axis0());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim4_axis0_neg) {
     execute(softmax_v1_2::CreateModel_axis_dim4_axis0_neg,
             softmax_v1_2::is_ignored_axis_dim4_axis0_neg,
-            softmax_v1_2::examples_axis_dim4_axis0_neg);
+            softmax_v1_2::get_examples_axis_dim4_axis0_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim4_axis1) {
     execute(softmax_v1_2::CreateModel_axis_dim4_axis1,
             softmax_v1_2::is_ignored_axis_dim4_axis1,
-            softmax_v1_2::examples_axis_dim4_axis1);
+            softmax_v1_2::get_examples_axis_dim4_axis1());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim4_axis1_neg) {
     execute(softmax_v1_2::CreateModel_axis_dim4_axis1_neg,
             softmax_v1_2::is_ignored_axis_dim4_axis1_neg,
-            softmax_v1_2::examples_axis_dim4_axis1_neg);
+            softmax_v1_2::get_examples_axis_dim4_axis1_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim4_axis2) {
     execute(softmax_v1_2::CreateModel_axis_dim4_axis2,
             softmax_v1_2::is_ignored_axis_dim4_axis2,
-            softmax_v1_2::examples_axis_dim4_axis2);
+            softmax_v1_2::get_examples_axis_dim4_axis2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim4_axis2_neg) {
     execute(softmax_v1_2::CreateModel_axis_dim4_axis2_neg,
             softmax_v1_2::is_ignored_axis_dim4_axis2_neg,
-            softmax_v1_2::examples_axis_dim4_axis2_neg);
+            softmax_v1_2::get_examples_axis_dim4_axis2_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim4_axis3) {
     execute(softmax_v1_2::CreateModel_axis_dim4_axis3,
             softmax_v1_2::is_ignored_axis_dim4_axis3,
-            softmax_v1_2::examples_axis_dim4_axis3);
+            softmax_v1_2::get_examples_axis_dim4_axis3());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim4_axis3_neg) {
     execute(softmax_v1_2::CreateModel_axis_dim4_axis3_neg,
             softmax_v1_2::is_ignored_axis_dim4_axis3_neg,
-            softmax_v1_2::examples_axis_dim4_axis3_neg);
+            softmax_v1_2::get_examples_axis_dim4_axis3_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim3_axis0) {
     execute(softmax_v1_2::CreateModel_axis_dim3_axis0,
             softmax_v1_2::is_ignored_axis_dim3_axis0,
-            softmax_v1_2::examples_axis_dim3_axis0);
+            softmax_v1_2::get_examples_axis_dim3_axis0());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim3_axis0_neg) {
     execute(softmax_v1_2::CreateModel_axis_dim3_axis0_neg,
             softmax_v1_2::is_ignored_axis_dim3_axis0_neg,
-            softmax_v1_2::examples_axis_dim3_axis0_neg);
+            softmax_v1_2::get_examples_axis_dim3_axis0_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim3_axis1) {
     execute(softmax_v1_2::CreateModel_axis_dim3_axis1,
             softmax_v1_2::is_ignored_axis_dim3_axis1,
-            softmax_v1_2::examples_axis_dim3_axis1);
+            softmax_v1_2::get_examples_axis_dim3_axis1());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim3_axis1_neg) {
     execute(softmax_v1_2::CreateModel_axis_dim3_axis1_neg,
             softmax_v1_2::is_ignored_axis_dim3_axis1_neg,
-            softmax_v1_2::examples_axis_dim3_axis1_neg);
+            softmax_v1_2::get_examples_axis_dim3_axis1_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim3_axis2) {
     execute(softmax_v1_2::CreateModel_axis_dim3_axis2,
             softmax_v1_2::is_ignored_axis_dim3_axis2,
-            softmax_v1_2::examples_axis_dim3_axis2);
+            softmax_v1_2::get_examples_axis_dim3_axis2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim3_axis2_neg) {
     execute(softmax_v1_2::CreateModel_axis_dim3_axis2_neg,
             softmax_v1_2::is_ignored_axis_dim3_axis2_neg,
-            softmax_v1_2::examples_axis_dim3_axis2_neg);
+            softmax_v1_2::get_examples_axis_dim3_axis2_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim2_axis0) {
     execute(softmax_v1_2::CreateModel_axis_dim2_axis0,
             softmax_v1_2::is_ignored_axis_dim2_axis0,
-            softmax_v1_2::examples_axis_dim2_axis0);
+            softmax_v1_2::get_examples_axis_dim2_axis0());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim2_axis0_neg) {
     execute(softmax_v1_2::CreateModel_axis_dim2_axis0_neg,
             softmax_v1_2::is_ignored_axis_dim2_axis0_neg,
-            softmax_v1_2::examples_axis_dim2_axis0_neg);
+            softmax_v1_2::get_examples_axis_dim2_axis0_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim2_axis1) {
     execute(softmax_v1_2::CreateModel_axis_dim2_axis1,
             softmax_v1_2::is_ignored_axis_dim2_axis1,
-            softmax_v1_2::examples_axis_dim2_axis1);
+            softmax_v1_2::get_examples_axis_dim2_axis1());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim2_axis1_neg) {
     execute(softmax_v1_2::CreateModel_axis_dim2_axis1_neg,
             softmax_v1_2::is_ignored_axis_dim2_axis1_neg,
-            softmax_v1_2::examples_axis_dim2_axis1_neg);
+            softmax_v1_2::get_examples_axis_dim2_axis1_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim1_axis0) {
     execute(softmax_v1_2::CreateModel_axis_dim1_axis0,
             softmax_v1_2::is_ignored_axis_dim1_axis0,
-            softmax_v1_2::examples_axis_dim1_axis0);
+            softmax_v1_2::get_examples_axis_dim1_axis0());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim1_axis0_neg) {
     execute(softmax_v1_2::CreateModel_axis_dim1_axis0_neg,
             softmax_v1_2::is_ignored_axis_dim1_axis0_neg,
-            softmax_v1_2::examples_axis_dim1_axis0_neg);
+            softmax_v1_2::get_examples_axis_dim1_axis0_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim4_axis0) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim4_axis0,
             softmax_v1_2::is_ignored_axis_relaxed_dim4_axis0,
-            softmax_v1_2::examples_axis_relaxed_dim4_axis0);
+            softmax_v1_2::get_examples_axis_relaxed_dim4_axis0());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim4_axis0_neg) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim4_axis0_neg,
             softmax_v1_2::is_ignored_axis_relaxed_dim4_axis0_neg,
-            softmax_v1_2::examples_axis_relaxed_dim4_axis0_neg);
+            softmax_v1_2::get_examples_axis_relaxed_dim4_axis0_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim4_axis1) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim4_axis1,
             softmax_v1_2::is_ignored_axis_relaxed_dim4_axis1,
-            softmax_v1_2::examples_axis_relaxed_dim4_axis1);
+            softmax_v1_2::get_examples_axis_relaxed_dim4_axis1());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim4_axis1_neg) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim4_axis1_neg,
             softmax_v1_2::is_ignored_axis_relaxed_dim4_axis1_neg,
-            softmax_v1_2::examples_axis_relaxed_dim4_axis1_neg);
+            softmax_v1_2::get_examples_axis_relaxed_dim4_axis1_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim4_axis2) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim4_axis2,
             softmax_v1_2::is_ignored_axis_relaxed_dim4_axis2,
-            softmax_v1_2::examples_axis_relaxed_dim4_axis2);
+            softmax_v1_2::get_examples_axis_relaxed_dim4_axis2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim4_axis2_neg) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim4_axis2_neg,
             softmax_v1_2::is_ignored_axis_relaxed_dim4_axis2_neg,
-            softmax_v1_2::examples_axis_relaxed_dim4_axis2_neg);
+            softmax_v1_2::get_examples_axis_relaxed_dim4_axis2_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim4_axis3) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim4_axis3,
             softmax_v1_2::is_ignored_axis_relaxed_dim4_axis3,
-            softmax_v1_2::examples_axis_relaxed_dim4_axis3);
+            softmax_v1_2::get_examples_axis_relaxed_dim4_axis3());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim4_axis3_neg) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim4_axis3_neg,
             softmax_v1_2::is_ignored_axis_relaxed_dim4_axis3_neg,
-            softmax_v1_2::examples_axis_relaxed_dim4_axis3_neg);
+            softmax_v1_2::get_examples_axis_relaxed_dim4_axis3_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim3_axis0) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim3_axis0,
             softmax_v1_2::is_ignored_axis_relaxed_dim3_axis0,
-            softmax_v1_2::examples_axis_relaxed_dim3_axis0);
+            softmax_v1_2::get_examples_axis_relaxed_dim3_axis0());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim3_axis0_neg) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim3_axis0_neg,
             softmax_v1_2::is_ignored_axis_relaxed_dim3_axis0_neg,
-            softmax_v1_2::examples_axis_relaxed_dim3_axis0_neg);
+            softmax_v1_2::get_examples_axis_relaxed_dim3_axis0_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim3_axis1) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim3_axis1,
             softmax_v1_2::is_ignored_axis_relaxed_dim3_axis1,
-            softmax_v1_2::examples_axis_relaxed_dim3_axis1);
+            softmax_v1_2::get_examples_axis_relaxed_dim3_axis1());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim3_axis1_neg) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim3_axis1_neg,
             softmax_v1_2::is_ignored_axis_relaxed_dim3_axis1_neg,
-            softmax_v1_2::examples_axis_relaxed_dim3_axis1_neg);
+            softmax_v1_2::get_examples_axis_relaxed_dim3_axis1_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim3_axis2) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim3_axis2,
             softmax_v1_2::is_ignored_axis_relaxed_dim3_axis2,
-            softmax_v1_2::examples_axis_relaxed_dim3_axis2);
+            softmax_v1_2::get_examples_axis_relaxed_dim3_axis2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim3_axis2_neg) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim3_axis2_neg,
             softmax_v1_2::is_ignored_axis_relaxed_dim3_axis2_neg,
-            softmax_v1_2::examples_axis_relaxed_dim3_axis2_neg);
+            softmax_v1_2::get_examples_axis_relaxed_dim3_axis2_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim2_axis0) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim2_axis0,
             softmax_v1_2::is_ignored_axis_relaxed_dim2_axis0,
-            softmax_v1_2::examples_axis_relaxed_dim2_axis0);
+            softmax_v1_2::get_examples_axis_relaxed_dim2_axis0());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim2_axis0_neg) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim2_axis0_neg,
             softmax_v1_2::is_ignored_axis_relaxed_dim2_axis0_neg,
-            softmax_v1_2::examples_axis_relaxed_dim2_axis0_neg);
+            softmax_v1_2::get_examples_axis_relaxed_dim2_axis0_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim2_axis1) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim2_axis1,
             softmax_v1_2::is_ignored_axis_relaxed_dim2_axis1,
-            softmax_v1_2::examples_axis_relaxed_dim2_axis1);
+            softmax_v1_2::get_examples_axis_relaxed_dim2_axis1());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim2_axis1_neg) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim2_axis1_neg,
             softmax_v1_2::is_ignored_axis_relaxed_dim2_axis1_neg,
-            softmax_v1_2::examples_axis_relaxed_dim2_axis1_neg);
+            softmax_v1_2::get_examples_axis_relaxed_dim2_axis1_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim1_axis0) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim1_axis0,
             softmax_v1_2::is_ignored_axis_relaxed_dim1_axis0,
-            softmax_v1_2::examples_axis_relaxed_dim1_axis0);
+            softmax_v1_2::get_examples_axis_relaxed_dim1_axis0());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim1_axis0_neg) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim1_axis0_neg,
             softmax_v1_2::is_ignored_axis_relaxed_dim1_axis0_neg,
-            softmax_v1_2::examples_axis_relaxed_dim1_axis0_neg);
+            softmax_v1_2::get_examples_axis_relaxed_dim1_axis0_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim4_axis0) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim4_axis0,
             softmax_v1_2::is_ignored_axis_float16_dim4_axis0,
-            softmax_v1_2::examples_axis_float16_dim4_axis0);
+            softmax_v1_2::get_examples_axis_float16_dim4_axis0());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim4_axis0_neg) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim4_axis0_neg,
             softmax_v1_2::is_ignored_axis_float16_dim4_axis0_neg,
-            softmax_v1_2::examples_axis_float16_dim4_axis0_neg);
+            softmax_v1_2::get_examples_axis_float16_dim4_axis0_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim4_axis1) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim4_axis1,
             softmax_v1_2::is_ignored_axis_float16_dim4_axis1,
-            softmax_v1_2::examples_axis_float16_dim4_axis1);
+            softmax_v1_2::get_examples_axis_float16_dim4_axis1());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim4_axis1_neg) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim4_axis1_neg,
             softmax_v1_2::is_ignored_axis_float16_dim4_axis1_neg,
-            softmax_v1_2::examples_axis_float16_dim4_axis1_neg);
+            softmax_v1_2::get_examples_axis_float16_dim4_axis1_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim4_axis2) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim4_axis2,
             softmax_v1_2::is_ignored_axis_float16_dim4_axis2,
-            softmax_v1_2::examples_axis_float16_dim4_axis2);
+            softmax_v1_2::get_examples_axis_float16_dim4_axis2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim4_axis2_neg) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim4_axis2_neg,
             softmax_v1_2::is_ignored_axis_float16_dim4_axis2_neg,
-            softmax_v1_2::examples_axis_float16_dim4_axis2_neg);
+            softmax_v1_2::get_examples_axis_float16_dim4_axis2_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim4_axis3) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim4_axis3,
             softmax_v1_2::is_ignored_axis_float16_dim4_axis3,
-            softmax_v1_2::examples_axis_float16_dim4_axis3);
+            softmax_v1_2::get_examples_axis_float16_dim4_axis3());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim4_axis3_neg) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim4_axis3_neg,
             softmax_v1_2::is_ignored_axis_float16_dim4_axis3_neg,
-            softmax_v1_2::examples_axis_float16_dim4_axis3_neg);
+            softmax_v1_2::get_examples_axis_float16_dim4_axis3_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim3_axis0) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim3_axis0,
             softmax_v1_2::is_ignored_axis_float16_dim3_axis0,
-            softmax_v1_2::examples_axis_float16_dim3_axis0);
+            softmax_v1_2::get_examples_axis_float16_dim3_axis0());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim3_axis0_neg) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim3_axis0_neg,
             softmax_v1_2::is_ignored_axis_float16_dim3_axis0_neg,
-            softmax_v1_2::examples_axis_float16_dim3_axis0_neg);
+            softmax_v1_2::get_examples_axis_float16_dim3_axis0_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim3_axis1) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim3_axis1,
             softmax_v1_2::is_ignored_axis_float16_dim3_axis1,
-            softmax_v1_2::examples_axis_float16_dim3_axis1);
+            softmax_v1_2::get_examples_axis_float16_dim3_axis1());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim3_axis1_neg) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim3_axis1_neg,
             softmax_v1_2::is_ignored_axis_float16_dim3_axis1_neg,
-            softmax_v1_2::examples_axis_float16_dim3_axis1_neg);
+            softmax_v1_2::get_examples_axis_float16_dim3_axis1_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim3_axis2) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim3_axis2,
             softmax_v1_2::is_ignored_axis_float16_dim3_axis2,
-            softmax_v1_2::examples_axis_float16_dim3_axis2);
+            softmax_v1_2::get_examples_axis_float16_dim3_axis2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim3_axis2_neg) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim3_axis2_neg,
             softmax_v1_2::is_ignored_axis_float16_dim3_axis2_neg,
-            softmax_v1_2::examples_axis_float16_dim3_axis2_neg);
+            softmax_v1_2::get_examples_axis_float16_dim3_axis2_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim2_axis0) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim2_axis0,
             softmax_v1_2::is_ignored_axis_float16_dim2_axis0,
-            softmax_v1_2::examples_axis_float16_dim2_axis0);
+            softmax_v1_2::get_examples_axis_float16_dim2_axis0());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim2_axis0_neg) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim2_axis0_neg,
             softmax_v1_2::is_ignored_axis_float16_dim2_axis0_neg,
-            softmax_v1_2::examples_axis_float16_dim2_axis0_neg);
+            softmax_v1_2::get_examples_axis_float16_dim2_axis0_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim2_axis1) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim2_axis1,
             softmax_v1_2::is_ignored_axis_float16_dim2_axis1,
-            softmax_v1_2::examples_axis_float16_dim2_axis1);
+            softmax_v1_2::get_examples_axis_float16_dim2_axis1());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim2_axis1_neg) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim2_axis1_neg,
             softmax_v1_2::is_ignored_axis_float16_dim2_axis1_neg,
-            softmax_v1_2::examples_axis_float16_dim2_axis1_neg);
+            softmax_v1_2::get_examples_axis_float16_dim2_axis1_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim1_axis0) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim1_axis0,
             softmax_v1_2::is_ignored_axis_float16_dim1_axis0,
-            softmax_v1_2::examples_axis_float16_dim1_axis0);
+            softmax_v1_2::get_examples_axis_float16_dim1_axis0());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim1_axis0_neg) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim1_axis0_neg,
             softmax_v1_2::is_ignored_axis_float16_dim1_axis0_neg,
-            softmax_v1_2::examples_axis_float16_dim1_axis0_neg);
+            softmax_v1_2::get_examples_axis_float16_dim1_axis0_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim4_axis0) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim4_axis0,
             softmax_v1_2::is_ignored_axis_quant8_dim4_axis0,
-            softmax_v1_2::examples_axis_quant8_dim4_axis0);
+            softmax_v1_2::get_examples_axis_quant8_dim4_axis0());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim4_axis0_neg) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim4_axis0_neg,
             softmax_v1_2::is_ignored_axis_quant8_dim4_axis0_neg,
-            softmax_v1_2::examples_axis_quant8_dim4_axis0_neg);
+            softmax_v1_2::get_examples_axis_quant8_dim4_axis0_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim4_axis1) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim4_axis1,
             softmax_v1_2::is_ignored_axis_quant8_dim4_axis1,
-            softmax_v1_2::examples_axis_quant8_dim4_axis1);
+            softmax_v1_2::get_examples_axis_quant8_dim4_axis1());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim4_axis1_neg) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim4_axis1_neg,
             softmax_v1_2::is_ignored_axis_quant8_dim4_axis1_neg,
-            softmax_v1_2::examples_axis_quant8_dim4_axis1_neg);
+            softmax_v1_2::get_examples_axis_quant8_dim4_axis1_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim4_axis2) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim4_axis2,
             softmax_v1_2::is_ignored_axis_quant8_dim4_axis2,
-            softmax_v1_2::examples_axis_quant8_dim4_axis2);
+            softmax_v1_2::get_examples_axis_quant8_dim4_axis2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim4_axis2_neg) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim4_axis2_neg,
             softmax_v1_2::is_ignored_axis_quant8_dim4_axis2_neg,
-            softmax_v1_2::examples_axis_quant8_dim4_axis2_neg);
+            softmax_v1_2::get_examples_axis_quant8_dim4_axis2_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim4_axis3) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim4_axis3,
             softmax_v1_2::is_ignored_axis_quant8_dim4_axis3,
-            softmax_v1_2::examples_axis_quant8_dim4_axis3);
+            softmax_v1_2::get_examples_axis_quant8_dim4_axis3());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim4_axis3_neg) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim4_axis3_neg,
             softmax_v1_2::is_ignored_axis_quant8_dim4_axis3_neg,
-            softmax_v1_2::examples_axis_quant8_dim4_axis3_neg);
+            softmax_v1_2::get_examples_axis_quant8_dim4_axis3_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim3_axis0) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim3_axis0,
             softmax_v1_2::is_ignored_axis_quant8_dim3_axis0,
-            softmax_v1_2::examples_axis_quant8_dim3_axis0);
+            softmax_v1_2::get_examples_axis_quant8_dim3_axis0());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim3_axis0_neg) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim3_axis0_neg,
             softmax_v1_2::is_ignored_axis_quant8_dim3_axis0_neg,
-            softmax_v1_2::examples_axis_quant8_dim3_axis0_neg);
+            softmax_v1_2::get_examples_axis_quant8_dim3_axis0_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim3_axis1) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim3_axis1,
             softmax_v1_2::is_ignored_axis_quant8_dim3_axis1,
-            softmax_v1_2::examples_axis_quant8_dim3_axis1);
+            softmax_v1_2::get_examples_axis_quant8_dim3_axis1());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim3_axis1_neg) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim3_axis1_neg,
             softmax_v1_2::is_ignored_axis_quant8_dim3_axis1_neg,
-            softmax_v1_2::examples_axis_quant8_dim3_axis1_neg);
+            softmax_v1_2::get_examples_axis_quant8_dim3_axis1_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim3_axis2) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim3_axis2,
             softmax_v1_2::is_ignored_axis_quant8_dim3_axis2,
-            softmax_v1_2::examples_axis_quant8_dim3_axis2);
+            softmax_v1_2::get_examples_axis_quant8_dim3_axis2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim3_axis2_neg) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim3_axis2_neg,
             softmax_v1_2::is_ignored_axis_quant8_dim3_axis2_neg,
-            softmax_v1_2::examples_axis_quant8_dim3_axis2_neg);
+            softmax_v1_2::get_examples_axis_quant8_dim3_axis2_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim2_axis0) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim2_axis0,
             softmax_v1_2::is_ignored_axis_quant8_dim2_axis0,
-            softmax_v1_2::examples_axis_quant8_dim2_axis0);
+            softmax_v1_2::get_examples_axis_quant8_dim2_axis0());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim2_axis0_neg) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim2_axis0_neg,
             softmax_v1_2::is_ignored_axis_quant8_dim2_axis0_neg,
-            softmax_v1_2::examples_axis_quant8_dim2_axis0_neg);
+            softmax_v1_2::get_examples_axis_quant8_dim2_axis0_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim2_axis1) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim2_axis1,
             softmax_v1_2::is_ignored_axis_quant8_dim2_axis1,
-            softmax_v1_2::examples_axis_quant8_dim2_axis1);
+            softmax_v1_2::get_examples_axis_quant8_dim2_axis1());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim2_axis1_neg) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim2_axis1_neg,
             softmax_v1_2::is_ignored_axis_quant8_dim2_axis1_neg,
-            softmax_v1_2::examples_axis_quant8_dim2_axis1_neg);
+            softmax_v1_2::get_examples_axis_quant8_dim2_axis1_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim1_axis0) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim1_axis0,
             softmax_v1_2::is_ignored_axis_quant8_dim1_axis0,
-            softmax_v1_2::examples_axis_quant8_dim1_axis0);
+            softmax_v1_2::get_examples_axis_quant8_dim1_axis0());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim1_axis0_neg) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim1_axis0_neg,
             softmax_v1_2::is_ignored_axis_quant8_dim1_axis0_neg,
-            softmax_v1_2::examples_axis_quant8_dim1_axis0_neg);
+            softmax_v1_2::get_examples_axis_quant8_dim1_axis0_neg());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim4_axis0_2) {
     execute(softmax_v1_2::CreateModel_axis_dim4_axis0_2,
             softmax_v1_2::is_ignored_axis_dim4_axis0_2,
-            softmax_v1_2::examples_axis_dim4_axis0_2);
+            softmax_v1_2::get_examples_axis_dim4_axis0_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim4_axis0_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_dim4_axis0_neg_2,
             softmax_v1_2::is_ignored_axis_dim4_axis0_neg_2,
-            softmax_v1_2::examples_axis_dim4_axis0_neg_2);
+            softmax_v1_2::get_examples_axis_dim4_axis0_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim4_axis1_2) {
     execute(softmax_v1_2::CreateModel_axis_dim4_axis1_2,
             softmax_v1_2::is_ignored_axis_dim4_axis1_2,
-            softmax_v1_2::examples_axis_dim4_axis1_2);
+            softmax_v1_2::get_examples_axis_dim4_axis1_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim4_axis1_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_dim4_axis1_neg_2,
             softmax_v1_2::is_ignored_axis_dim4_axis1_neg_2,
-            softmax_v1_2::examples_axis_dim4_axis1_neg_2);
+            softmax_v1_2::get_examples_axis_dim4_axis1_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim4_axis2_2) {
     execute(softmax_v1_2::CreateModel_axis_dim4_axis2_2,
             softmax_v1_2::is_ignored_axis_dim4_axis2_2,
-            softmax_v1_2::examples_axis_dim4_axis2_2);
+            softmax_v1_2::get_examples_axis_dim4_axis2_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim4_axis2_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_dim4_axis2_neg_2,
             softmax_v1_2::is_ignored_axis_dim4_axis2_neg_2,
-            softmax_v1_2::examples_axis_dim4_axis2_neg_2);
+            softmax_v1_2::get_examples_axis_dim4_axis2_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim4_axis3_2) {
     execute(softmax_v1_2::CreateModel_axis_dim4_axis3_2,
             softmax_v1_2::is_ignored_axis_dim4_axis3_2,
-            softmax_v1_2::examples_axis_dim4_axis3_2);
+            softmax_v1_2::get_examples_axis_dim4_axis3_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim4_axis3_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_dim4_axis3_neg_2,
             softmax_v1_2::is_ignored_axis_dim4_axis3_neg_2,
-            softmax_v1_2::examples_axis_dim4_axis3_neg_2);
+            softmax_v1_2::get_examples_axis_dim4_axis3_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim3_axis0_2) {
     execute(softmax_v1_2::CreateModel_axis_dim3_axis0_2,
             softmax_v1_2::is_ignored_axis_dim3_axis0_2,
-            softmax_v1_2::examples_axis_dim3_axis0_2);
+            softmax_v1_2::get_examples_axis_dim3_axis0_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim3_axis0_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_dim3_axis0_neg_2,
             softmax_v1_2::is_ignored_axis_dim3_axis0_neg_2,
-            softmax_v1_2::examples_axis_dim3_axis0_neg_2);
+            softmax_v1_2::get_examples_axis_dim3_axis0_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim3_axis1_2) {
     execute(softmax_v1_2::CreateModel_axis_dim3_axis1_2,
             softmax_v1_2::is_ignored_axis_dim3_axis1_2,
-            softmax_v1_2::examples_axis_dim3_axis1_2);
+            softmax_v1_2::get_examples_axis_dim3_axis1_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim3_axis1_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_dim3_axis1_neg_2,
             softmax_v1_2::is_ignored_axis_dim3_axis1_neg_2,
-            softmax_v1_2::examples_axis_dim3_axis1_neg_2);
+            softmax_v1_2::get_examples_axis_dim3_axis1_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim3_axis2_2) {
     execute(softmax_v1_2::CreateModel_axis_dim3_axis2_2,
             softmax_v1_2::is_ignored_axis_dim3_axis2_2,
-            softmax_v1_2::examples_axis_dim3_axis2_2);
+            softmax_v1_2::get_examples_axis_dim3_axis2_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim3_axis2_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_dim3_axis2_neg_2,
             softmax_v1_2::is_ignored_axis_dim3_axis2_neg_2,
-            softmax_v1_2::examples_axis_dim3_axis2_neg_2);
+            softmax_v1_2::get_examples_axis_dim3_axis2_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim2_axis0_2) {
     execute(softmax_v1_2::CreateModel_axis_dim2_axis0_2,
             softmax_v1_2::is_ignored_axis_dim2_axis0_2,
-            softmax_v1_2::examples_axis_dim2_axis0_2);
+            softmax_v1_2::get_examples_axis_dim2_axis0_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim2_axis0_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_dim2_axis0_neg_2,
             softmax_v1_2::is_ignored_axis_dim2_axis0_neg_2,
-            softmax_v1_2::examples_axis_dim2_axis0_neg_2);
+            softmax_v1_2::get_examples_axis_dim2_axis0_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim2_axis1_2) {
     execute(softmax_v1_2::CreateModel_axis_dim2_axis1_2,
             softmax_v1_2::is_ignored_axis_dim2_axis1_2,
-            softmax_v1_2::examples_axis_dim2_axis1_2);
+            softmax_v1_2::get_examples_axis_dim2_axis1_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim2_axis1_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_dim2_axis1_neg_2,
             softmax_v1_2::is_ignored_axis_dim2_axis1_neg_2,
-            softmax_v1_2::examples_axis_dim2_axis1_neg_2);
+            softmax_v1_2::get_examples_axis_dim2_axis1_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim1_axis0_2) {
     execute(softmax_v1_2::CreateModel_axis_dim1_axis0_2,
             softmax_v1_2::is_ignored_axis_dim1_axis0_2,
-            softmax_v1_2::examples_axis_dim1_axis0_2);
+            softmax_v1_2::get_examples_axis_dim1_axis0_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_dim1_axis0_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_dim1_axis0_neg_2,
             softmax_v1_2::is_ignored_axis_dim1_axis0_neg_2,
-            softmax_v1_2::examples_axis_dim1_axis0_neg_2);
+            softmax_v1_2::get_examples_axis_dim1_axis0_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim4_axis0_2) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim4_axis0_2,
             softmax_v1_2::is_ignored_axis_relaxed_dim4_axis0_2,
-            softmax_v1_2::examples_axis_relaxed_dim4_axis0_2);
+            softmax_v1_2::get_examples_axis_relaxed_dim4_axis0_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim4_axis0_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim4_axis0_neg_2,
             softmax_v1_2::is_ignored_axis_relaxed_dim4_axis0_neg_2,
-            softmax_v1_2::examples_axis_relaxed_dim4_axis0_neg_2);
+            softmax_v1_2::get_examples_axis_relaxed_dim4_axis0_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim4_axis1_2) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim4_axis1_2,
             softmax_v1_2::is_ignored_axis_relaxed_dim4_axis1_2,
-            softmax_v1_2::examples_axis_relaxed_dim4_axis1_2);
+            softmax_v1_2::get_examples_axis_relaxed_dim4_axis1_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim4_axis1_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim4_axis1_neg_2,
             softmax_v1_2::is_ignored_axis_relaxed_dim4_axis1_neg_2,
-            softmax_v1_2::examples_axis_relaxed_dim4_axis1_neg_2);
+            softmax_v1_2::get_examples_axis_relaxed_dim4_axis1_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim4_axis2_2) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim4_axis2_2,
             softmax_v1_2::is_ignored_axis_relaxed_dim4_axis2_2,
-            softmax_v1_2::examples_axis_relaxed_dim4_axis2_2);
+            softmax_v1_2::get_examples_axis_relaxed_dim4_axis2_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim4_axis2_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim4_axis2_neg_2,
             softmax_v1_2::is_ignored_axis_relaxed_dim4_axis2_neg_2,
-            softmax_v1_2::examples_axis_relaxed_dim4_axis2_neg_2);
+            softmax_v1_2::get_examples_axis_relaxed_dim4_axis2_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim4_axis3_2) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim4_axis3_2,
             softmax_v1_2::is_ignored_axis_relaxed_dim4_axis3_2,
-            softmax_v1_2::examples_axis_relaxed_dim4_axis3_2);
+            softmax_v1_2::get_examples_axis_relaxed_dim4_axis3_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim4_axis3_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim4_axis3_neg_2,
             softmax_v1_2::is_ignored_axis_relaxed_dim4_axis3_neg_2,
-            softmax_v1_2::examples_axis_relaxed_dim4_axis3_neg_2);
+            softmax_v1_2::get_examples_axis_relaxed_dim4_axis3_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim3_axis0_2) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim3_axis0_2,
             softmax_v1_2::is_ignored_axis_relaxed_dim3_axis0_2,
-            softmax_v1_2::examples_axis_relaxed_dim3_axis0_2);
+            softmax_v1_2::get_examples_axis_relaxed_dim3_axis0_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim3_axis0_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim3_axis0_neg_2,
             softmax_v1_2::is_ignored_axis_relaxed_dim3_axis0_neg_2,
-            softmax_v1_2::examples_axis_relaxed_dim3_axis0_neg_2);
+            softmax_v1_2::get_examples_axis_relaxed_dim3_axis0_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim3_axis1_2) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim3_axis1_2,
             softmax_v1_2::is_ignored_axis_relaxed_dim3_axis1_2,
-            softmax_v1_2::examples_axis_relaxed_dim3_axis1_2);
+            softmax_v1_2::get_examples_axis_relaxed_dim3_axis1_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim3_axis1_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim3_axis1_neg_2,
             softmax_v1_2::is_ignored_axis_relaxed_dim3_axis1_neg_2,
-            softmax_v1_2::examples_axis_relaxed_dim3_axis1_neg_2);
+            softmax_v1_2::get_examples_axis_relaxed_dim3_axis1_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim3_axis2_2) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim3_axis2_2,
             softmax_v1_2::is_ignored_axis_relaxed_dim3_axis2_2,
-            softmax_v1_2::examples_axis_relaxed_dim3_axis2_2);
+            softmax_v1_2::get_examples_axis_relaxed_dim3_axis2_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim3_axis2_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim3_axis2_neg_2,
             softmax_v1_2::is_ignored_axis_relaxed_dim3_axis2_neg_2,
-            softmax_v1_2::examples_axis_relaxed_dim3_axis2_neg_2);
+            softmax_v1_2::get_examples_axis_relaxed_dim3_axis2_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim2_axis0_2) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim2_axis0_2,
             softmax_v1_2::is_ignored_axis_relaxed_dim2_axis0_2,
-            softmax_v1_2::examples_axis_relaxed_dim2_axis0_2);
+            softmax_v1_2::get_examples_axis_relaxed_dim2_axis0_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim2_axis0_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim2_axis0_neg_2,
             softmax_v1_2::is_ignored_axis_relaxed_dim2_axis0_neg_2,
-            softmax_v1_2::examples_axis_relaxed_dim2_axis0_neg_2);
+            softmax_v1_2::get_examples_axis_relaxed_dim2_axis0_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim2_axis1_2) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim2_axis1_2,
             softmax_v1_2::is_ignored_axis_relaxed_dim2_axis1_2,
-            softmax_v1_2::examples_axis_relaxed_dim2_axis1_2);
+            softmax_v1_2::get_examples_axis_relaxed_dim2_axis1_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim2_axis1_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim2_axis1_neg_2,
             softmax_v1_2::is_ignored_axis_relaxed_dim2_axis1_neg_2,
-            softmax_v1_2::examples_axis_relaxed_dim2_axis1_neg_2);
+            softmax_v1_2::get_examples_axis_relaxed_dim2_axis1_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim1_axis0_2) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim1_axis0_2,
             softmax_v1_2::is_ignored_axis_relaxed_dim1_axis0_2,
-            softmax_v1_2::examples_axis_relaxed_dim1_axis0_2);
+            softmax_v1_2::get_examples_axis_relaxed_dim1_axis0_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_relaxed_dim1_axis0_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_relaxed_dim1_axis0_neg_2,
             softmax_v1_2::is_ignored_axis_relaxed_dim1_axis0_neg_2,
-            softmax_v1_2::examples_axis_relaxed_dim1_axis0_neg_2);
+            softmax_v1_2::get_examples_axis_relaxed_dim1_axis0_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim4_axis0_2) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim4_axis0_2,
             softmax_v1_2::is_ignored_axis_float16_dim4_axis0_2,
-            softmax_v1_2::examples_axis_float16_dim4_axis0_2);
+            softmax_v1_2::get_examples_axis_float16_dim4_axis0_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim4_axis0_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim4_axis0_neg_2,
             softmax_v1_2::is_ignored_axis_float16_dim4_axis0_neg_2,
-            softmax_v1_2::examples_axis_float16_dim4_axis0_neg_2);
+            softmax_v1_2::get_examples_axis_float16_dim4_axis0_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim4_axis1_2) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim4_axis1_2,
             softmax_v1_2::is_ignored_axis_float16_dim4_axis1_2,
-            softmax_v1_2::examples_axis_float16_dim4_axis1_2);
+            softmax_v1_2::get_examples_axis_float16_dim4_axis1_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim4_axis1_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim4_axis1_neg_2,
             softmax_v1_2::is_ignored_axis_float16_dim4_axis1_neg_2,
-            softmax_v1_2::examples_axis_float16_dim4_axis1_neg_2);
+            softmax_v1_2::get_examples_axis_float16_dim4_axis1_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim4_axis2_2) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim4_axis2_2,
             softmax_v1_2::is_ignored_axis_float16_dim4_axis2_2,
-            softmax_v1_2::examples_axis_float16_dim4_axis2_2);
+            softmax_v1_2::get_examples_axis_float16_dim4_axis2_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim4_axis2_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim4_axis2_neg_2,
             softmax_v1_2::is_ignored_axis_float16_dim4_axis2_neg_2,
-            softmax_v1_2::examples_axis_float16_dim4_axis2_neg_2);
+            softmax_v1_2::get_examples_axis_float16_dim4_axis2_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim4_axis3_2) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim4_axis3_2,
             softmax_v1_2::is_ignored_axis_float16_dim4_axis3_2,
-            softmax_v1_2::examples_axis_float16_dim4_axis3_2);
+            softmax_v1_2::get_examples_axis_float16_dim4_axis3_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim4_axis3_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim4_axis3_neg_2,
             softmax_v1_2::is_ignored_axis_float16_dim4_axis3_neg_2,
-            softmax_v1_2::examples_axis_float16_dim4_axis3_neg_2);
+            softmax_v1_2::get_examples_axis_float16_dim4_axis3_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim3_axis0_2) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim3_axis0_2,
             softmax_v1_2::is_ignored_axis_float16_dim3_axis0_2,
-            softmax_v1_2::examples_axis_float16_dim3_axis0_2);
+            softmax_v1_2::get_examples_axis_float16_dim3_axis0_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim3_axis0_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim3_axis0_neg_2,
             softmax_v1_2::is_ignored_axis_float16_dim3_axis0_neg_2,
-            softmax_v1_2::examples_axis_float16_dim3_axis0_neg_2);
+            softmax_v1_2::get_examples_axis_float16_dim3_axis0_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim3_axis1_2) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim3_axis1_2,
             softmax_v1_2::is_ignored_axis_float16_dim3_axis1_2,
-            softmax_v1_2::examples_axis_float16_dim3_axis1_2);
+            softmax_v1_2::get_examples_axis_float16_dim3_axis1_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim3_axis1_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim3_axis1_neg_2,
             softmax_v1_2::is_ignored_axis_float16_dim3_axis1_neg_2,
-            softmax_v1_2::examples_axis_float16_dim3_axis1_neg_2);
+            softmax_v1_2::get_examples_axis_float16_dim3_axis1_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim3_axis2_2) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim3_axis2_2,
             softmax_v1_2::is_ignored_axis_float16_dim3_axis2_2,
-            softmax_v1_2::examples_axis_float16_dim3_axis2_2);
+            softmax_v1_2::get_examples_axis_float16_dim3_axis2_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim3_axis2_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim3_axis2_neg_2,
             softmax_v1_2::is_ignored_axis_float16_dim3_axis2_neg_2,
-            softmax_v1_2::examples_axis_float16_dim3_axis2_neg_2);
+            softmax_v1_2::get_examples_axis_float16_dim3_axis2_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim2_axis0_2) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim2_axis0_2,
             softmax_v1_2::is_ignored_axis_float16_dim2_axis0_2,
-            softmax_v1_2::examples_axis_float16_dim2_axis0_2);
+            softmax_v1_2::get_examples_axis_float16_dim2_axis0_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim2_axis0_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim2_axis0_neg_2,
             softmax_v1_2::is_ignored_axis_float16_dim2_axis0_neg_2,
-            softmax_v1_2::examples_axis_float16_dim2_axis0_neg_2);
+            softmax_v1_2::get_examples_axis_float16_dim2_axis0_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim2_axis1_2) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim2_axis1_2,
             softmax_v1_2::is_ignored_axis_float16_dim2_axis1_2,
-            softmax_v1_2::examples_axis_float16_dim2_axis1_2);
+            softmax_v1_2::get_examples_axis_float16_dim2_axis1_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim2_axis1_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim2_axis1_neg_2,
             softmax_v1_2::is_ignored_axis_float16_dim2_axis1_neg_2,
-            softmax_v1_2::examples_axis_float16_dim2_axis1_neg_2);
+            softmax_v1_2::get_examples_axis_float16_dim2_axis1_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim1_axis0_2) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim1_axis0_2,
             softmax_v1_2::is_ignored_axis_float16_dim1_axis0_2,
-            softmax_v1_2::examples_axis_float16_dim1_axis0_2);
+            softmax_v1_2::get_examples_axis_float16_dim1_axis0_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_float16_dim1_axis0_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_float16_dim1_axis0_neg_2,
             softmax_v1_2::is_ignored_axis_float16_dim1_axis0_neg_2,
-            softmax_v1_2::examples_axis_float16_dim1_axis0_neg_2);
+            softmax_v1_2::get_examples_axis_float16_dim1_axis0_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim4_axis0_2) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim4_axis0_2,
             softmax_v1_2::is_ignored_axis_quant8_dim4_axis0_2,
-            softmax_v1_2::examples_axis_quant8_dim4_axis0_2);
+            softmax_v1_2::get_examples_axis_quant8_dim4_axis0_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim4_axis0_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim4_axis0_neg_2,
             softmax_v1_2::is_ignored_axis_quant8_dim4_axis0_neg_2,
-            softmax_v1_2::examples_axis_quant8_dim4_axis0_neg_2);
+            softmax_v1_2::get_examples_axis_quant8_dim4_axis0_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim4_axis1_2) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim4_axis1_2,
             softmax_v1_2::is_ignored_axis_quant8_dim4_axis1_2,
-            softmax_v1_2::examples_axis_quant8_dim4_axis1_2);
+            softmax_v1_2::get_examples_axis_quant8_dim4_axis1_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim4_axis1_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim4_axis1_neg_2,
             softmax_v1_2::is_ignored_axis_quant8_dim4_axis1_neg_2,
-            softmax_v1_2::examples_axis_quant8_dim4_axis1_neg_2);
+            softmax_v1_2::get_examples_axis_quant8_dim4_axis1_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim4_axis2_2) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim4_axis2_2,
             softmax_v1_2::is_ignored_axis_quant8_dim4_axis2_2,
-            softmax_v1_2::examples_axis_quant8_dim4_axis2_2);
+            softmax_v1_2::get_examples_axis_quant8_dim4_axis2_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim4_axis2_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim4_axis2_neg_2,
             softmax_v1_2::is_ignored_axis_quant8_dim4_axis2_neg_2,
-            softmax_v1_2::examples_axis_quant8_dim4_axis2_neg_2);
+            softmax_v1_2::get_examples_axis_quant8_dim4_axis2_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim4_axis3_2) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim4_axis3_2,
             softmax_v1_2::is_ignored_axis_quant8_dim4_axis3_2,
-            softmax_v1_2::examples_axis_quant8_dim4_axis3_2);
+            softmax_v1_2::get_examples_axis_quant8_dim4_axis3_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim4_axis3_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim4_axis3_neg_2,
             softmax_v1_2::is_ignored_axis_quant8_dim4_axis3_neg_2,
-            softmax_v1_2::examples_axis_quant8_dim4_axis3_neg_2);
+            softmax_v1_2::get_examples_axis_quant8_dim4_axis3_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim3_axis0_2) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim3_axis0_2,
             softmax_v1_2::is_ignored_axis_quant8_dim3_axis0_2,
-            softmax_v1_2::examples_axis_quant8_dim3_axis0_2);
+            softmax_v1_2::get_examples_axis_quant8_dim3_axis0_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim3_axis0_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim3_axis0_neg_2,
             softmax_v1_2::is_ignored_axis_quant8_dim3_axis0_neg_2,
-            softmax_v1_2::examples_axis_quant8_dim3_axis0_neg_2);
+            softmax_v1_2::get_examples_axis_quant8_dim3_axis0_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim3_axis1_2) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim3_axis1_2,
             softmax_v1_2::is_ignored_axis_quant8_dim3_axis1_2,
-            softmax_v1_2::examples_axis_quant8_dim3_axis1_2);
+            softmax_v1_2::get_examples_axis_quant8_dim3_axis1_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim3_axis1_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim3_axis1_neg_2,
             softmax_v1_2::is_ignored_axis_quant8_dim3_axis1_neg_2,
-            softmax_v1_2::examples_axis_quant8_dim3_axis1_neg_2);
+            softmax_v1_2::get_examples_axis_quant8_dim3_axis1_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim3_axis2_2) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim3_axis2_2,
             softmax_v1_2::is_ignored_axis_quant8_dim3_axis2_2,
-            softmax_v1_2::examples_axis_quant8_dim3_axis2_2);
+            softmax_v1_2::get_examples_axis_quant8_dim3_axis2_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim3_axis2_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim3_axis2_neg_2,
             softmax_v1_2::is_ignored_axis_quant8_dim3_axis2_neg_2,
-            softmax_v1_2::examples_axis_quant8_dim3_axis2_neg_2);
+            softmax_v1_2::get_examples_axis_quant8_dim3_axis2_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim2_axis0_2) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim2_axis0_2,
             softmax_v1_2::is_ignored_axis_quant8_dim2_axis0_2,
-            softmax_v1_2::examples_axis_quant8_dim2_axis0_2);
+            softmax_v1_2::get_examples_axis_quant8_dim2_axis0_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim2_axis0_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim2_axis0_neg_2,
             softmax_v1_2::is_ignored_axis_quant8_dim2_axis0_neg_2,
-            softmax_v1_2::examples_axis_quant8_dim2_axis0_neg_2);
+            softmax_v1_2::get_examples_axis_quant8_dim2_axis0_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim2_axis1_2) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim2_axis1_2,
             softmax_v1_2::is_ignored_axis_quant8_dim2_axis1_2,
-            softmax_v1_2::examples_axis_quant8_dim2_axis1_2);
+            softmax_v1_2::get_examples_axis_quant8_dim2_axis1_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim2_axis1_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim2_axis1_neg_2,
             softmax_v1_2::is_ignored_axis_quant8_dim2_axis1_neg_2,
-            softmax_v1_2::examples_axis_quant8_dim2_axis1_neg_2);
+            softmax_v1_2::get_examples_axis_quant8_dim2_axis1_neg_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim1_axis0_2) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim1_axis0_2,
             softmax_v1_2::is_ignored_axis_quant8_dim1_axis0_2,
-            softmax_v1_2::examples_axis_quant8_dim1_axis0_2);
+            softmax_v1_2::get_examples_axis_quant8_dim1_axis0_2());
 }
 
 TEST_F(GeneratedTests, softmax_v1_2_axis_quant8_dim1_axis0_neg_2) {
     execute(softmax_v1_2::CreateModel_axis_quant8_dim1_axis0_neg_2,
             softmax_v1_2::is_ignored_axis_quant8_dim1_axis0_neg_2,
-            softmax_v1_2::examples_axis_quant8_dim1_axis0_neg_2);
+            softmax_v1_2::get_examples_axis_quant8_dim1_axis0_neg_2());
 }
 
diff --git a/nn/runtime/test/generated/tests/space_to_batch.mod.py.cpp b/nn/runtime/test/generated/tests/space_to_batch.mod.py.cpp
index ac614de..1243f28 100644
--- a/nn/runtime/test/generated/tests/space_to_batch.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/space_to_batch.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, space_to_batch) {
     execute(space_to_batch::CreateModel,
             space_to_batch::is_ignored,
-            space_to_batch::examples);
+            space_to_batch::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/space_to_batch_float_1.mod.py.cpp b/nn/runtime/test/generated/tests/space_to_batch_float_1.mod.py.cpp
index fca2adb..2b2c9ef 100644
--- a/nn/runtime/test/generated/tests/space_to_batch_float_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/space_to_batch_float_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, space_to_batch_float_1) {
     execute(space_to_batch_float_1::CreateModel,
             space_to_batch_float_1::is_ignored,
-            space_to_batch_float_1::examples);
+            space_to_batch_float_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/space_to_batch_float_1_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/space_to_batch_float_1_relaxed.mod.py.cpp
index 402eec3..ddd6504 100644
--- a/nn/runtime/test/generated/tests/space_to_batch_float_1_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/space_to_batch_float_1_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, space_to_batch_float_1_relaxed) {
     execute(space_to_batch_float_1_relaxed::CreateModel,
             space_to_batch_float_1_relaxed::is_ignored,
-            space_to_batch_float_1_relaxed::examples);
+            space_to_batch_float_1_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/space_to_batch_float_2.mod.py.cpp b/nn/runtime/test/generated/tests/space_to_batch_float_2.mod.py.cpp
index a8800fa..ec9419c 100644
--- a/nn/runtime/test/generated/tests/space_to_batch_float_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/space_to_batch_float_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, space_to_batch_float_2) {
     execute(space_to_batch_float_2::CreateModel,
             space_to_batch_float_2::is_ignored,
-            space_to_batch_float_2::examples);
+            space_to_batch_float_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/space_to_batch_float_2_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/space_to_batch_float_2_relaxed.mod.py.cpp
index 4ce1f59..75f23e9 100644
--- a/nn/runtime/test/generated/tests/space_to_batch_float_2_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/space_to_batch_float_2_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, space_to_batch_float_2_relaxed) {
     execute(space_to_batch_float_2_relaxed::CreateModel,
             space_to_batch_float_2_relaxed::is_ignored,
-            space_to_batch_float_2_relaxed::examples);
+            space_to_batch_float_2_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/space_to_batch_float_3.mod.py.cpp b/nn/runtime/test/generated/tests/space_to_batch_float_3.mod.py.cpp
index 253567b..3cfdaf4 100644
--- a/nn/runtime/test/generated/tests/space_to_batch_float_3.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/space_to_batch_float_3.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, space_to_batch_float_3) {
     execute(space_to_batch_float_3::CreateModel,
             space_to_batch_float_3::is_ignored,
-            space_to_batch_float_3::examples);
+            space_to_batch_float_3::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/space_to_batch_float_3_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/space_to_batch_float_3_relaxed.mod.py.cpp
index 9a62313..571fed6 100644
--- a/nn/runtime/test/generated/tests/space_to_batch_float_3_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/space_to_batch_float_3_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, space_to_batch_float_3_relaxed) {
     execute(space_to_batch_float_3_relaxed::CreateModel,
             space_to_batch_float_3_relaxed::is_ignored,
-            space_to_batch_float_3_relaxed::examples);
+            space_to_batch_float_3_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/space_to_batch_quant8_1.mod.py.cpp b/nn/runtime/test/generated/tests/space_to_batch_quant8_1.mod.py.cpp
index bb3ec87..95081d7 100644
--- a/nn/runtime/test/generated/tests/space_to_batch_quant8_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/space_to_batch_quant8_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, space_to_batch_quant8_1) {
     execute(space_to_batch_quant8_1::CreateModel,
             space_to_batch_quant8_1::is_ignored,
-            space_to_batch_quant8_1::examples);
+            space_to_batch_quant8_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/space_to_batch_quant8_2.mod.py.cpp b/nn/runtime/test/generated/tests/space_to_batch_quant8_2.mod.py.cpp
index cfbc1b6..91501de 100644
--- a/nn/runtime/test/generated/tests/space_to_batch_quant8_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/space_to_batch_quant8_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, space_to_batch_quant8_2) {
     execute(space_to_batch_quant8_2::CreateModel,
             space_to_batch_quant8_2::is_ignored,
-            space_to_batch_quant8_2::examples);
+            space_to_batch_quant8_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/space_to_batch_quant8_3.mod.py.cpp b/nn/runtime/test/generated/tests/space_to_batch_quant8_3.mod.py.cpp
index 576bb8f..332282a 100644
--- a/nn/runtime/test/generated/tests/space_to_batch_quant8_3.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/space_to_batch_quant8_3.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, space_to_batch_quant8_3) {
     execute(space_to_batch_quant8_3::CreateModel,
             space_to_batch_quant8_3::is_ignored,
-            space_to_batch_quant8_3::examples);
+            space_to_batch_quant8_3::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/space_to_batch_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/space_to_batch_relaxed.mod.py.cpp
index f87697c..4f8bea2 100644
--- a/nn/runtime/test/generated/tests/space_to_batch_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/space_to_batch_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, space_to_batch_relaxed) {
     execute(space_to_batch_relaxed::CreateModel,
             space_to_batch_relaxed::is_ignored,
-            space_to_batch_relaxed::examples);
+            space_to_batch_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/space_to_batch_v1_2.mod.py.cpp b/nn/runtime/test/generated/tests/space_to_batch_v1_2.mod.py.cpp
index d59a3b9..0c58633 100644
--- a/nn/runtime/test/generated/tests/space_to_batch_v1_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/space_to_batch_v1_2.mod.py.cpp
@@ -12,144 +12,192 @@
 TEST_F(GeneratedTests, space_to_batch_v1_2_nhwc) {
     execute(space_to_batch_v1_2::CreateModel_nhwc,
             space_to_batch_v1_2::is_ignored_nhwc,
-            space_to_batch_v1_2::examples_nhwc);
+            space_to_batch_v1_2::get_examples_nhwc());
 }
 
 TEST_F(GeneratedTests, space_to_batch_v1_2_nhwc_relaxed) {
     execute(space_to_batch_v1_2::CreateModel_nhwc_relaxed,
             space_to_batch_v1_2::is_ignored_nhwc_relaxed,
-            space_to_batch_v1_2::examples_nhwc_relaxed);
+            space_to_batch_v1_2::get_examples_nhwc_relaxed());
+}
+
+TEST_F(GeneratedTests, space_to_batch_v1_2_nhwc_float16) {
+    execute(space_to_batch_v1_2::CreateModel_nhwc_float16,
+            space_to_batch_v1_2::is_ignored_nhwc_float16,
+            space_to_batch_v1_2::get_examples_nhwc_float16());
 }
 
 TEST_F(GeneratedTests, space_to_batch_v1_2_nhwc_quant8) {
     execute(space_to_batch_v1_2::CreateModel_nhwc_quant8,
             space_to_batch_v1_2::is_ignored_nhwc_quant8,
-            space_to_batch_v1_2::examples_nhwc_quant8);
+            space_to_batch_v1_2::get_examples_nhwc_quant8());
 }
 
 TEST_F(GeneratedTests, space_to_batch_v1_2_nchw) {
     execute(space_to_batch_v1_2::CreateModel_nchw,
             space_to_batch_v1_2::is_ignored_nchw,
-            space_to_batch_v1_2::examples_nchw);
+            space_to_batch_v1_2::get_examples_nchw());
 }
 
 TEST_F(GeneratedTests, space_to_batch_v1_2_nchw_relaxed) {
     execute(space_to_batch_v1_2::CreateModel_nchw_relaxed,
             space_to_batch_v1_2::is_ignored_nchw_relaxed,
-            space_to_batch_v1_2::examples_nchw_relaxed);
+            space_to_batch_v1_2::get_examples_nchw_relaxed());
+}
+
+TEST_F(GeneratedTests, space_to_batch_v1_2_nchw_float16) {
+    execute(space_to_batch_v1_2::CreateModel_nchw_float16,
+            space_to_batch_v1_2::is_ignored_nchw_float16,
+            space_to_batch_v1_2::get_examples_nchw_float16());
 }
 
 TEST_F(GeneratedTests, space_to_batch_v1_2_nchw_quant8) {
     execute(space_to_batch_v1_2::CreateModel_nchw_quant8,
             space_to_batch_v1_2::is_ignored_nchw_quant8,
-            space_to_batch_v1_2::examples_nchw_quant8);
+            space_to_batch_v1_2::get_examples_nchw_quant8());
 }
 
 TEST_F(GeneratedTests, space_to_batch_v1_2_nhwc_2) {
     execute(space_to_batch_v1_2::CreateModel_nhwc_2,
             space_to_batch_v1_2::is_ignored_nhwc_2,
-            space_to_batch_v1_2::examples_nhwc_2);
+            space_to_batch_v1_2::get_examples_nhwc_2());
 }
 
 TEST_F(GeneratedTests, space_to_batch_v1_2_nhwc_relaxed_2) {
     execute(space_to_batch_v1_2::CreateModel_nhwc_relaxed_2,
             space_to_batch_v1_2::is_ignored_nhwc_relaxed_2,
-            space_to_batch_v1_2::examples_nhwc_relaxed_2);
+            space_to_batch_v1_2::get_examples_nhwc_relaxed_2());
+}
+
+TEST_F(GeneratedTests, space_to_batch_v1_2_nhwc_float16_2) {
+    execute(space_to_batch_v1_2::CreateModel_nhwc_float16_2,
+            space_to_batch_v1_2::is_ignored_nhwc_float16_2,
+            space_to_batch_v1_2::get_examples_nhwc_float16_2());
 }
 
 TEST_F(GeneratedTests, space_to_batch_v1_2_nhwc_quant8_2) {
     execute(space_to_batch_v1_2::CreateModel_nhwc_quant8_2,
             space_to_batch_v1_2::is_ignored_nhwc_quant8_2,
-            space_to_batch_v1_2::examples_nhwc_quant8_2);
+            space_to_batch_v1_2::get_examples_nhwc_quant8_2());
 }
 
 TEST_F(GeneratedTests, space_to_batch_v1_2_nchw_2) {
     execute(space_to_batch_v1_2::CreateModel_nchw_2,
             space_to_batch_v1_2::is_ignored_nchw_2,
-            space_to_batch_v1_2::examples_nchw_2);
+            space_to_batch_v1_2::get_examples_nchw_2());
 }
 
 TEST_F(GeneratedTests, space_to_batch_v1_2_nchw_relaxed_2) {
     execute(space_to_batch_v1_2::CreateModel_nchw_relaxed_2,
             space_to_batch_v1_2::is_ignored_nchw_relaxed_2,
-            space_to_batch_v1_2::examples_nchw_relaxed_2);
+            space_to_batch_v1_2::get_examples_nchw_relaxed_2());
+}
+
+TEST_F(GeneratedTests, space_to_batch_v1_2_nchw_float16_2) {
+    execute(space_to_batch_v1_2::CreateModel_nchw_float16_2,
+            space_to_batch_v1_2::is_ignored_nchw_float16_2,
+            space_to_batch_v1_2::get_examples_nchw_float16_2());
 }
 
 TEST_F(GeneratedTests, space_to_batch_v1_2_nchw_quant8_2) {
     execute(space_to_batch_v1_2::CreateModel_nchw_quant8_2,
             space_to_batch_v1_2::is_ignored_nchw_quant8_2,
-            space_to_batch_v1_2::examples_nchw_quant8_2);
+            space_to_batch_v1_2::get_examples_nchw_quant8_2());
 }
 
 TEST_F(GeneratedTests, space_to_batch_v1_2_nhwc_3) {
     execute(space_to_batch_v1_2::CreateModel_nhwc_3,
             space_to_batch_v1_2::is_ignored_nhwc_3,
-            space_to_batch_v1_2::examples_nhwc_3);
+            space_to_batch_v1_2::get_examples_nhwc_3());
 }
 
 TEST_F(GeneratedTests, space_to_batch_v1_2_nhwc_relaxed_3) {
     execute(space_to_batch_v1_2::CreateModel_nhwc_relaxed_3,
             space_to_batch_v1_2::is_ignored_nhwc_relaxed_3,
-            space_to_batch_v1_2::examples_nhwc_relaxed_3);
+            space_to_batch_v1_2::get_examples_nhwc_relaxed_3());
+}
+
+TEST_F(GeneratedTests, space_to_batch_v1_2_nhwc_float16_3) {
+    execute(space_to_batch_v1_2::CreateModel_nhwc_float16_3,
+            space_to_batch_v1_2::is_ignored_nhwc_float16_3,
+            space_to_batch_v1_2::get_examples_nhwc_float16_3());
 }
 
 TEST_F(GeneratedTests, space_to_batch_v1_2_nhwc_quant8_3) {
     execute(space_to_batch_v1_2::CreateModel_nhwc_quant8_3,
             space_to_batch_v1_2::is_ignored_nhwc_quant8_3,
-            space_to_batch_v1_2::examples_nhwc_quant8_3);
+            space_to_batch_v1_2::get_examples_nhwc_quant8_3());
 }
 
 TEST_F(GeneratedTests, space_to_batch_v1_2_nchw_3) {
     execute(space_to_batch_v1_2::CreateModel_nchw_3,
             space_to_batch_v1_2::is_ignored_nchw_3,
-            space_to_batch_v1_2::examples_nchw_3);
+            space_to_batch_v1_2::get_examples_nchw_3());
 }
 
 TEST_F(GeneratedTests, space_to_batch_v1_2_nchw_relaxed_3) {
     execute(space_to_batch_v1_2::CreateModel_nchw_relaxed_3,
             space_to_batch_v1_2::is_ignored_nchw_relaxed_3,
-            space_to_batch_v1_2::examples_nchw_relaxed_3);
+            space_to_batch_v1_2::get_examples_nchw_relaxed_3());
+}
+
+TEST_F(GeneratedTests, space_to_batch_v1_2_nchw_float16_3) {
+    execute(space_to_batch_v1_2::CreateModel_nchw_float16_3,
+            space_to_batch_v1_2::is_ignored_nchw_float16_3,
+            space_to_batch_v1_2::get_examples_nchw_float16_3());
 }
 
 TEST_F(GeneratedTests, space_to_batch_v1_2_nchw_quant8_3) {
     execute(space_to_batch_v1_2::CreateModel_nchw_quant8_3,
             space_to_batch_v1_2::is_ignored_nchw_quant8_3,
-            space_to_batch_v1_2::examples_nchw_quant8_3);
+            space_to_batch_v1_2::get_examples_nchw_quant8_3());
 }
 
 TEST_F(GeneratedTests, space_to_batch_v1_2_nhwc_4) {
     execute(space_to_batch_v1_2::CreateModel_nhwc_4,
             space_to_batch_v1_2::is_ignored_nhwc_4,
-            space_to_batch_v1_2::examples_nhwc_4);
+            space_to_batch_v1_2::get_examples_nhwc_4());
 }
 
 TEST_F(GeneratedTests, space_to_batch_v1_2_nhwc_relaxed_4) {
     execute(space_to_batch_v1_2::CreateModel_nhwc_relaxed_4,
             space_to_batch_v1_2::is_ignored_nhwc_relaxed_4,
-            space_to_batch_v1_2::examples_nhwc_relaxed_4);
+            space_to_batch_v1_2::get_examples_nhwc_relaxed_4());
+}
+
+TEST_F(GeneratedTests, space_to_batch_v1_2_nhwc_float16_4) {
+    execute(space_to_batch_v1_2::CreateModel_nhwc_float16_4,
+            space_to_batch_v1_2::is_ignored_nhwc_float16_4,
+            space_to_batch_v1_2::get_examples_nhwc_float16_4());
 }
 
 TEST_F(GeneratedTests, space_to_batch_v1_2_nhwc_quant8_4) {
     execute(space_to_batch_v1_2::CreateModel_nhwc_quant8_4,
             space_to_batch_v1_2::is_ignored_nhwc_quant8_4,
-            space_to_batch_v1_2::examples_nhwc_quant8_4);
+            space_to_batch_v1_2::get_examples_nhwc_quant8_4());
 }
 
 TEST_F(GeneratedTests, space_to_batch_v1_2_nchw_4) {
     execute(space_to_batch_v1_2::CreateModel_nchw_4,
             space_to_batch_v1_2::is_ignored_nchw_4,
-            space_to_batch_v1_2::examples_nchw_4);
+            space_to_batch_v1_2::get_examples_nchw_4());
 }
 
 TEST_F(GeneratedTests, space_to_batch_v1_2_nchw_relaxed_4) {
     execute(space_to_batch_v1_2::CreateModel_nchw_relaxed_4,
             space_to_batch_v1_2::is_ignored_nchw_relaxed_4,
-            space_to_batch_v1_2::examples_nchw_relaxed_4);
+            space_to_batch_v1_2::get_examples_nchw_relaxed_4());
+}
+
+TEST_F(GeneratedTests, space_to_batch_v1_2_nchw_float16_4) {
+    execute(space_to_batch_v1_2::CreateModel_nchw_float16_4,
+            space_to_batch_v1_2::is_ignored_nchw_float16_4,
+            space_to_batch_v1_2::get_examples_nchw_float16_4());
 }
 
 TEST_F(GeneratedTests, space_to_batch_v1_2_nchw_quant8_4) {
     execute(space_to_batch_v1_2::CreateModel_nchw_quant8_4,
             space_to_batch_v1_2::is_ignored_nchw_quant8_4,
-            space_to_batch_v1_2::examples_nchw_quant8_4);
+            space_to_batch_v1_2::get_examples_nchw_quant8_4());
 }
 
diff --git a/nn/runtime/test/generated/tests/space_to_depth_float_1.mod.py.cpp b/nn/runtime/test/generated/tests/space_to_depth_float_1.mod.py.cpp
index 43d3828..33c5ccb 100644
--- a/nn/runtime/test/generated/tests/space_to_depth_float_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/space_to_depth_float_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, space_to_depth_float_1) {
     execute(space_to_depth_float_1::CreateModel,
             space_to_depth_float_1::is_ignored,
-            space_to_depth_float_1::examples);
+            space_to_depth_float_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/space_to_depth_float_1_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/space_to_depth_float_1_relaxed.mod.py.cpp
index 6270960..f166cfb 100644
--- a/nn/runtime/test/generated/tests/space_to_depth_float_1_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/space_to_depth_float_1_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, space_to_depth_float_1_relaxed) {
     execute(space_to_depth_float_1_relaxed::CreateModel,
             space_to_depth_float_1_relaxed::is_ignored,
-            space_to_depth_float_1_relaxed::examples);
+            space_to_depth_float_1_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/space_to_depth_float_2.mod.py.cpp b/nn/runtime/test/generated/tests/space_to_depth_float_2.mod.py.cpp
index 8b260ac..6d1ab9c 100644
--- a/nn/runtime/test/generated/tests/space_to_depth_float_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/space_to_depth_float_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, space_to_depth_float_2) {
     execute(space_to_depth_float_2::CreateModel,
             space_to_depth_float_2::is_ignored,
-            space_to_depth_float_2::examples);
+            space_to_depth_float_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/space_to_depth_float_2_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/space_to_depth_float_2_relaxed.mod.py.cpp
index 7a3a25c..cf21d1f 100644
--- a/nn/runtime/test/generated/tests/space_to_depth_float_2_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/space_to_depth_float_2_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, space_to_depth_float_2_relaxed) {
     execute(space_to_depth_float_2_relaxed::CreateModel,
             space_to_depth_float_2_relaxed::is_ignored,
-            space_to_depth_float_2_relaxed::examples);
+            space_to_depth_float_2_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/space_to_depth_float_3.mod.py.cpp b/nn/runtime/test/generated/tests/space_to_depth_float_3.mod.py.cpp
index d024b4a..c7ba1e4 100644
--- a/nn/runtime/test/generated/tests/space_to_depth_float_3.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/space_to_depth_float_3.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, space_to_depth_float_3) {
     execute(space_to_depth_float_3::CreateModel,
             space_to_depth_float_3::is_ignored,
-            space_to_depth_float_3::examples);
+            space_to_depth_float_3::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/space_to_depth_float_3_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/space_to_depth_float_3_relaxed.mod.py.cpp
index cb6aaae..b2a620f 100644
--- a/nn/runtime/test/generated/tests/space_to_depth_float_3_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/space_to_depth_float_3_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, space_to_depth_float_3_relaxed) {
     execute(space_to_depth_float_3_relaxed::CreateModel,
             space_to_depth_float_3_relaxed::is_ignored,
-            space_to_depth_float_3_relaxed::examples);
+            space_to_depth_float_3_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/space_to_depth_quant8_1.mod.py.cpp b/nn/runtime/test/generated/tests/space_to_depth_quant8_1.mod.py.cpp
index 9591229..e28c952 100644
--- a/nn/runtime/test/generated/tests/space_to_depth_quant8_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/space_to_depth_quant8_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, space_to_depth_quant8_1) {
     execute(space_to_depth_quant8_1::CreateModel,
             space_to_depth_quant8_1::is_ignored,
-            space_to_depth_quant8_1::examples);
+            space_to_depth_quant8_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/space_to_depth_quant8_2.mod.py.cpp b/nn/runtime/test/generated/tests/space_to_depth_quant8_2.mod.py.cpp
index 3497e00..fcbd81e 100644
--- a/nn/runtime/test/generated/tests/space_to_depth_quant8_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/space_to_depth_quant8_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, space_to_depth_quant8_2) {
     execute(space_to_depth_quant8_2::CreateModel,
             space_to_depth_quant8_2::is_ignored,
-            space_to_depth_quant8_2::examples);
+            space_to_depth_quant8_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/space_to_depth_v1_2.mod.py.cpp b/nn/runtime/test/generated/tests/space_to_depth_v1_2.mod.py.cpp
index 4233a41..db9236d 100644
--- a/nn/runtime/test/generated/tests/space_to_depth_v1_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/space_to_depth_v1_2.mod.py.cpp
@@ -12,108 +12,144 @@
 TEST_F(GeneratedTests, space_to_depth_v1_2_nhwc) {
     execute(space_to_depth_v1_2::CreateModel_nhwc,
             space_to_depth_v1_2::is_ignored_nhwc,
-            space_to_depth_v1_2::examples_nhwc);
+            space_to_depth_v1_2::get_examples_nhwc());
 }
 
 TEST_F(GeneratedTests, space_to_depth_v1_2_nhwc_relaxed) {
     execute(space_to_depth_v1_2::CreateModel_nhwc_relaxed,
             space_to_depth_v1_2::is_ignored_nhwc_relaxed,
-            space_to_depth_v1_2::examples_nhwc_relaxed);
+            space_to_depth_v1_2::get_examples_nhwc_relaxed());
+}
+
+TEST_F(GeneratedTests, space_to_depth_v1_2_nhwc_float16) {
+    execute(space_to_depth_v1_2::CreateModel_nhwc_float16,
+            space_to_depth_v1_2::is_ignored_nhwc_float16,
+            space_to_depth_v1_2::get_examples_nhwc_float16());
 }
 
 TEST_F(GeneratedTests, space_to_depth_v1_2_nhwc_quant8) {
     execute(space_to_depth_v1_2::CreateModel_nhwc_quant8,
             space_to_depth_v1_2::is_ignored_nhwc_quant8,
-            space_to_depth_v1_2::examples_nhwc_quant8);
+            space_to_depth_v1_2::get_examples_nhwc_quant8());
 }
 
 TEST_F(GeneratedTests, space_to_depth_v1_2_nchw) {
     execute(space_to_depth_v1_2::CreateModel_nchw,
             space_to_depth_v1_2::is_ignored_nchw,
-            space_to_depth_v1_2::examples_nchw);
+            space_to_depth_v1_2::get_examples_nchw());
 }
 
 TEST_F(GeneratedTests, space_to_depth_v1_2_nchw_relaxed) {
     execute(space_to_depth_v1_2::CreateModel_nchw_relaxed,
             space_to_depth_v1_2::is_ignored_nchw_relaxed,
-            space_to_depth_v1_2::examples_nchw_relaxed);
+            space_to_depth_v1_2::get_examples_nchw_relaxed());
+}
+
+TEST_F(GeneratedTests, space_to_depth_v1_2_nchw_float16) {
+    execute(space_to_depth_v1_2::CreateModel_nchw_float16,
+            space_to_depth_v1_2::is_ignored_nchw_float16,
+            space_to_depth_v1_2::get_examples_nchw_float16());
 }
 
 TEST_F(GeneratedTests, space_to_depth_v1_2_nchw_quant8) {
     execute(space_to_depth_v1_2::CreateModel_nchw_quant8,
             space_to_depth_v1_2::is_ignored_nchw_quant8,
-            space_to_depth_v1_2::examples_nchw_quant8);
+            space_to_depth_v1_2::get_examples_nchw_quant8());
 }
 
 TEST_F(GeneratedTests, space_to_depth_v1_2_nhwc_2) {
     execute(space_to_depth_v1_2::CreateModel_nhwc_2,
             space_to_depth_v1_2::is_ignored_nhwc_2,
-            space_to_depth_v1_2::examples_nhwc_2);
+            space_to_depth_v1_2::get_examples_nhwc_2());
 }
 
 TEST_F(GeneratedTests, space_to_depth_v1_2_nhwc_relaxed_2) {
     execute(space_to_depth_v1_2::CreateModel_nhwc_relaxed_2,
             space_to_depth_v1_2::is_ignored_nhwc_relaxed_2,
-            space_to_depth_v1_2::examples_nhwc_relaxed_2);
+            space_to_depth_v1_2::get_examples_nhwc_relaxed_2());
+}
+
+TEST_F(GeneratedTests, space_to_depth_v1_2_nhwc_float16_2) {
+    execute(space_to_depth_v1_2::CreateModel_nhwc_float16_2,
+            space_to_depth_v1_2::is_ignored_nhwc_float16_2,
+            space_to_depth_v1_2::get_examples_nhwc_float16_2());
 }
 
 TEST_F(GeneratedTests, space_to_depth_v1_2_nhwc_quant8_2) {
     execute(space_to_depth_v1_2::CreateModel_nhwc_quant8_2,
             space_to_depth_v1_2::is_ignored_nhwc_quant8_2,
-            space_to_depth_v1_2::examples_nhwc_quant8_2);
+            space_to_depth_v1_2::get_examples_nhwc_quant8_2());
 }
 
 TEST_F(GeneratedTests, space_to_depth_v1_2_nchw_2) {
     execute(space_to_depth_v1_2::CreateModel_nchw_2,
             space_to_depth_v1_2::is_ignored_nchw_2,
-            space_to_depth_v1_2::examples_nchw_2);
+            space_to_depth_v1_2::get_examples_nchw_2());
 }
 
 TEST_F(GeneratedTests, space_to_depth_v1_2_nchw_relaxed_2) {
     execute(space_to_depth_v1_2::CreateModel_nchw_relaxed_2,
             space_to_depth_v1_2::is_ignored_nchw_relaxed_2,
-            space_to_depth_v1_2::examples_nchw_relaxed_2);
+            space_to_depth_v1_2::get_examples_nchw_relaxed_2());
+}
+
+TEST_F(GeneratedTests, space_to_depth_v1_2_nchw_float16_2) {
+    execute(space_to_depth_v1_2::CreateModel_nchw_float16_2,
+            space_to_depth_v1_2::is_ignored_nchw_float16_2,
+            space_to_depth_v1_2::get_examples_nchw_float16_2());
 }
 
 TEST_F(GeneratedTests, space_to_depth_v1_2_nchw_quant8_2) {
     execute(space_to_depth_v1_2::CreateModel_nchw_quant8_2,
             space_to_depth_v1_2::is_ignored_nchw_quant8_2,
-            space_to_depth_v1_2::examples_nchw_quant8_2);
+            space_to_depth_v1_2::get_examples_nchw_quant8_2());
 }
 
 TEST_F(GeneratedTests, space_to_depth_v1_2_nhwc_3) {
     execute(space_to_depth_v1_2::CreateModel_nhwc_3,
             space_to_depth_v1_2::is_ignored_nhwc_3,
-            space_to_depth_v1_2::examples_nhwc_3);
+            space_to_depth_v1_2::get_examples_nhwc_3());
 }
 
 TEST_F(GeneratedTests, space_to_depth_v1_2_nhwc_relaxed_3) {
     execute(space_to_depth_v1_2::CreateModel_nhwc_relaxed_3,
             space_to_depth_v1_2::is_ignored_nhwc_relaxed_3,
-            space_to_depth_v1_2::examples_nhwc_relaxed_3);
+            space_to_depth_v1_2::get_examples_nhwc_relaxed_3());
+}
+
+TEST_F(GeneratedTests, space_to_depth_v1_2_nhwc_float16_3) {
+    execute(space_to_depth_v1_2::CreateModel_nhwc_float16_3,
+            space_to_depth_v1_2::is_ignored_nhwc_float16_3,
+            space_to_depth_v1_2::get_examples_nhwc_float16_3());
 }
 
 TEST_F(GeneratedTests, space_to_depth_v1_2_nhwc_quant8_3) {
     execute(space_to_depth_v1_2::CreateModel_nhwc_quant8_3,
             space_to_depth_v1_2::is_ignored_nhwc_quant8_3,
-            space_to_depth_v1_2::examples_nhwc_quant8_3);
+            space_to_depth_v1_2::get_examples_nhwc_quant8_3());
 }
 
 TEST_F(GeneratedTests, space_to_depth_v1_2_nchw_3) {
     execute(space_to_depth_v1_2::CreateModel_nchw_3,
             space_to_depth_v1_2::is_ignored_nchw_3,
-            space_to_depth_v1_2::examples_nchw_3);
+            space_to_depth_v1_2::get_examples_nchw_3());
 }
 
 TEST_F(GeneratedTests, space_to_depth_v1_2_nchw_relaxed_3) {
     execute(space_to_depth_v1_2::CreateModel_nchw_relaxed_3,
             space_to_depth_v1_2::is_ignored_nchw_relaxed_3,
-            space_to_depth_v1_2::examples_nchw_relaxed_3);
+            space_to_depth_v1_2::get_examples_nchw_relaxed_3());
+}
+
+TEST_F(GeneratedTests, space_to_depth_v1_2_nchw_float16_3) {
+    execute(space_to_depth_v1_2::CreateModel_nchw_float16_3,
+            space_to_depth_v1_2::is_ignored_nchw_float16_3,
+            space_to_depth_v1_2::get_examples_nchw_float16_3());
 }
 
 TEST_F(GeneratedTests, space_to_depth_v1_2_nchw_quant8_3) {
     execute(space_to_depth_v1_2::CreateModel_nchw_quant8_3,
             space_to_depth_v1_2::is_ignored_nchw_quant8_3,
-            space_to_depth_v1_2::examples_nchw_quant8_3);
+            space_to_depth_v1_2::get_examples_nchw_quant8_3());
 }
 
diff --git a/nn/runtime/test/generated/tests/split_float_1.mod.py.cpp b/nn/runtime/test/generated/tests/split_float_1.mod.py.cpp
index 8fc38ab..06432d1 100644
--- a/nn/runtime/test/generated/tests/split_float_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/split_float_1.mod.py.cpp
@@ -12,18 +12,18 @@
 TEST_F(GeneratedTests, split_float_1) {
     execute(split_float_1::CreateModel,
             split_float_1::is_ignored,
-            split_float_1::examples);
+            split_float_1::get_examples());
 }
 
 TEST_F(GeneratedTests, split_float_1_relaxed) {
     execute(split_float_1::CreateModel_relaxed,
             split_float_1::is_ignored_relaxed,
-            split_float_1::examples_relaxed);
+            split_float_1::get_examples_relaxed());
 }
 
 TEST_F(GeneratedTests, split_float_1_float16) {
     execute(split_float_1::CreateModel_float16,
             split_float_1::is_ignored_float16,
-            split_float_1::examples_float16);
+            split_float_1::get_examples_float16());
 }
 
diff --git a/nn/runtime/test/generated/tests/split_float_2.mod.py.cpp b/nn/runtime/test/generated/tests/split_float_2.mod.py.cpp
index 0e08ff6..49ac143 100644
--- a/nn/runtime/test/generated/tests/split_float_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/split_float_2.mod.py.cpp
@@ -12,18 +12,18 @@
 TEST_F(GeneratedTests, split_float_2) {
     execute(split_float_2::CreateModel,
             split_float_2::is_ignored,
-            split_float_2::examples);
+            split_float_2::get_examples());
 }
 
 TEST_F(GeneratedTests, split_float_2_relaxed) {
     execute(split_float_2::CreateModel_relaxed,
             split_float_2::is_ignored_relaxed,
-            split_float_2::examples_relaxed);
+            split_float_2::get_examples_relaxed());
 }
 
 TEST_F(GeneratedTests, split_float_2_float16) {
     execute(split_float_2::CreateModel_float16,
             split_float_2::is_ignored_float16,
-            split_float_2::examples_float16);
+            split_float_2::get_examples_float16());
 }
 
diff --git a/nn/runtime/test/generated/tests/split_float_3.mod.py.cpp b/nn/runtime/test/generated/tests/split_float_3.mod.py.cpp
index 29bbed2..f08f2d5 100644
--- a/nn/runtime/test/generated/tests/split_float_3.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/split_float_3.mod.py.cpp
@@ -12,18 +12,18 @@
 TEST_F(GeneratedTests, split_float_3) {
     execute(split_float_3::CreateModel,
             split_float_3::is_ignored,
-            split_float_3::examples);
+            split_float_3::get_examples());
 }
 
 TEST_F(GeneratedTests, split_float_3_relaxed) {
     execute(split_float_3::CreateModel_relaxed,
             split_float_3::is_ignored_relaxed,
-            split_float_3::examples_relaxed);
+            split_float_3::get_examples_relaxed());
 }
 
 TEST_F(GeneratedTests, split_float_3_float16) {
     execute(split_float_3::CreateModel_float16,
             split_float_3::is_ignored_float16,
-            split_float_3::examples_float16);
+            split_float_3::get_examples_float16());
 }
 
diff --git a/nn/runtime/test/generated/tests/split_float_4.mod.py.cpp b/nn/runtime/test/generated/tests/split_float_4.mod.py.cpp
index ff56773..9ee2334 100644
--- a/nn/runtime/test/generated/tests/split_float_4.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/split_float_4.mod.py.cpp
@@ -12,18 +12,18 @@
 TEST_F(GeneratedTests, split_float_4) {
     execute(split_float_4::CreateModel,
             split_float_4::is_ignored,
-            split_float_4::examples);
+            split_float_4::get_examples());
 }
 
 TEST_F(GeneratedTests, split_float_4_relaxed) {
     execute(split_float_4::CreateModel_relaxed,
             split_float_4::is_ignored_relaxed,
-            split_float_4::examples_relaxed);
+            split_float_4::get_examples_relaxed());
 }
 
 TEST_F(GeneratedTests, split_float_4_float16) {
     execute(split_float_4::CreateModel_float16,
             split_float_4::is_ignored_float16,
-            split_float_4::examples_float16);
+            split_float_4::get_examples_float16());
 }
 
diff --git a/nn/runtime/test/generated/tests/split_float_5.mod.py.cpp b/nn/runtime/test/generated/tests/split_float_5.mod.py.cpp
index bf37e1b..6514e84 100644
--- a/nn/runtime/test/generated/tests/split_float_5.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/split_float_5.mod.py.cpp
@@ -12,18 +12,18 @@
 TEST_F(GeneratedTests, split_float_5) {
     execute(split_float_5::CreateModel,
             split_float_5::is_ignored,
-            split_float_5::examples);
+            split_float_5::get_examples());
 }
 
 TEST_F(GeneratedTests, split_float_5_relaxed) {
     execute(split_float_5::CreateModel_relaxed,
             split_float_5::is_ignored_relaxed,
-            split_float_5::examples_relaxed);
+            split_float_5::get_examples_relaxed());
 }
 
 TEST_F(GeneratedTests, split_float_5_float16) {
     execute(split_float_5::CreateModel_float16,
             split_float_5::is_ignored_float16,
-            split_float_5::examples_float16);
+            split_float_5::get_examples_float16());
 }
 
diff --git a/nn/runtime/test/generated/tests/split_int32_1.mod.py.cpp b/nn/runtime/test/generated/tests/split_int32_1.mod.py.cpp
index 034c446..2651bee 100644
--- a/nn/runtime/test/generated/tests/split_int32_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/split_int32_1.mod.py.cpp
@@ -12,12 +12,12 @@
 TEST_F(GeneratedTests, split_int32_1) {
     execute(split_int32_1::CreateModel,
             split_int32_1::is_ignored,
-            split_int32_1::examples);
+            split_int32_1::get_examples());
 }
 
 TEST_F(GeneratedTests, split_int32_1_relaxed) {
     execute(split_int32_1::CreateModel_relaxed,
             split_int32_1::is_ignored_relaxed,
-            split_int32_1::examples_relaxed);
+            split_int32_1::get_examples_relaxed());
 }
 
diff --git a/nn/runtime/test/generated/tests/split_int32_2.mod.py.cpp b/nn/runtime/test/generated/tests/split_int32_2.mod.py.cpp
index a103713..a3722f8 100644
--- a/nn/runtime/test/generated/tests/split_int32_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/split_int32_2.mod.py.cpp
@@ -12,12 +12,12 @@
 TEST_F(GeneratedTests, split_int32_2) {
     execute(split_int32_2::CreateModel,
             split_int32_2::is_ignored,
-            split_int32_2::examples);
+            split_int32_2::get_examples());
 }
 
 TEST_F(GeneratedTests, split_int32_2_relaxed) {
     execute(split_int32_2::CreateModel_relaxed,
             split_int32_2::is_ignored_relaxed,
-            split_int32_2::examples_relaxed);
+            split_int32_2::get_examples_relaxed());
 }
 
diff --git a/nn/runtime/test/generated/tests/split_int32_3.mod.py.cpp b/nn/runtime/test/generated/tests/split_int32_3.mod.py.cpp
index 7b01fc8..462b551 100644
--- a/nn/runtime/test/generated/tests/split_int32_3.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/split_int32_3.mod.py.cpp
@@ -12,12 +12,12 @@
 TEST_F(GeneratedTests, split_int32_3) {
     execute(split_int32_3::CreateModel,
             split_int32_3::is_ignored,
-            split_int32_3::examples);
+            split_int32_3::get_examples());
 }
 
 TEST_F(GeneratedTests, split_int32_3_relaxed) {
     execute(split_int32_3::CreateModel_relaxed,
             split_int32_3::is_ignored_relaxed,
-            split_int32_3::examples_relaxed);
+            split_int32_3::get_examples_relaxed());
 }
 
diff --git a/nn/runtime/test/generated/tests/split_int32_4.mod.py.cpp b/nn/runtime/test/generated/tests/split_int32_4.mod.py.cpp
index 7be4b97..20d6134 100644
--- a/nn/runtime/test/generated/tests/split_int32_4.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/split_int32_4.mod.py.cpp
@@ -12,12 +12,12 @@
 TEST_F(GeneratedTests, split_int32_4) {
     execute(split_int32_4::CreateModel,
             split_int32_4::is_ignored,
-            split_int32_4::examples);
+            split_int32_4::get_examples());
 }
 
 TEST_F(GeneratedTests, split_int32_4_relaxed) {
     execute(split_int32_4::CreateModel_relaxed,
             split_int32_4::is_ignored_relaxed,
-            split_int32_4::examples_relaxed);
+            split_int32_4::get_examples_relaxed());
 }
 
diff --git a/nn/runtime/test/generated/tests/split_quant8_1.mod.py.cpp b/nn/runtime/test/generated/tests/split_quant8_1.mod.py.cpp
index 1d98249..dd2118a 100644
--- a/nn/runtime/test/generated/tests/split_quant8_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/split_quant8_1.mod.py.cpp
@@ -12,12 +12,12 @@
 TEST_F(GeneratedTests, split_quant8_1) {
     execute(split_quant8_1::CreateModel,
             split_quant8_1::is_ignored,
-            split_quant8_1::examples);
+            split_quant8_1::get_examples());
 }
 
 TEST_F(GeneratedTests, split_quant8_1_relaxed) {
     execute(split_quant8_1::CreateModel_relaxed,
             split_quant8_1::is_ignored_relaxed,
-            split_quant8_1::examples_relaxed);
+            split_quant8_1::get_examples_relaxed());
 }
 
diff --git a/nn/runtime/test/generated/tests/split_quant8_2.mod.py.cpp b/nn/runtime/test/generated/tests/split_quant8_2.mod.py.cpp
index d5fac23..978ec53 100644
--- a/nn/runtime/test/generated/tests/split_quant8_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/split_quant8_2.mod.py.cpp
@@ -12,12 +12,12 @@
 TEST_F(GeneratedTests, split_quant8_2) {
     execute(split_quant8_2::CreateModel,
             split_quant8_2::is_ignored,
-            split_quant8_2::examples);
+            split_quant8_2::get_examples());
 }
 
 TEST_F(GeneratedTests, split_quant8_2_relaxed) {
     execute(split_quant8_2::CreateModel_relaxed,
             split_quant8_2::is_ignored_relaxed,
-            split_quant8_2::examples_relaxed);
+            split_quant8_2::get_examples_relaxed());
 }
 
diff --git a/nn/runtime/test/generated/tests/split_quant8_3.mod.py.cpp b/nn/runtime/test/generated/tests/split_quant8_3.mod.py.cpp
index bd7b1f3..3895763 100644
--- a/nn/runtime/test/generated/tests/split_quant8_3.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/split_quant8_3.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, split_quant8_3) {
     execute(split_quant8_3::CreateModel,
             split_quant8_3::is_ignored,
-            split_quant8_3::examples);
+            split_quant8_3::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/split_quant8_4.mod.py.cpp b/nn/runtime/test/generated/tests/split_quant8_4.mod.py.cpp
index 66b3f0e..19e64e9 100644
--- a/nn/runtime/test/generated/tests/split_quant8_4.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/split_quant8_4.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, split_quant8_4) {
     execute(split_quant8_4::CreateModel,
             split_quant8_4::is_ignored,
-            split_quant8_4::examples);
+            split_quant8_4::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/squeeze.mod.py.cpp b/nn/runtime/test/generated/tests/squeeze.mod.py.cpp
index 0ec9554..2dbe4d1 100644
--- a/nn/runtime/test/generated/tests/squeeze.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/squeeze.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, squeeze) {
     execute(squeeze::CreateModel,
             squeeze::is_ignored,
-            squeeze::examples);
+            squeeze::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/squeeze_float16.mod.py.cpp b/nn/runtime/test/generated/tests/squeeze_float16.mod.py.cpp
new file mode 100644
index 0000000..2e544a9
--- /dev/null
+++ b/nn/runtime/test/generated/tests/squeeze_float16.mod.py.cpp
@@ -0,0 +1,17 @@
+// clang-format off
+// Generated file (from: squeeze_float16.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace squeeze_float16 {
+// Generated squeeze_float16 test
+#include "generated/examples/squeeze_float16.example.cpp"
+// Generated model constructor
+#include "generated/models/squeeze_float16.model.cpp"
+} // namespace squeeze_float16
+
+TEST_F(GeneratedTests, squeeze_float16) {
+    execute(squeeze_float16::CreateModel,
+            squeeze_float16::is_ignored,
+            squeeze_float16::get_examples());
+}
+
diff --git a/nn/runtime/test/generated/tests/squeeze_float_1.mod.py.cpp b/nn/runtime/test/generated/tests/squeeze_float_1.mod.py.cpp
index dc77961..db84ff6 100644
--- a/nn/runtime/test/generated/tests/squeeze_float_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/squeeze_float_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, squeeze_float_1) {
     execute(squeeze_float_1::CreateModel,
             squeeze_float_1::is_ignored,
-            squeeze_float_1::examples);
+            squeeze_float_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/squeeze_float_1_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/squeeze_float_1_relaxed.mod.py.cpp
index f37e711..c046cf2 100644
--- a/nn/runtime/test/generated/tests/squeeze_float_1_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/squeeze_float_1_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, squeeze_float_1_relaxed) {
     execute(squeeze_float_1_relaxed::CreateModel,
             squeeze_float_1_relaxed::is_ignored,
-            squeeze_float_1_relaxed::examples);
+            squeeze_float_1_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/squeeze_quant8_1.mod.py.cpp b/nn/runtime/test/generated/tests/squeeze_quant8_1.mod.py.cpp
index beb9a3a..1805170 100644
--- a/nn/runtime/test/generated/tests/squeeze_quant8_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/squeeze_quant8_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, squeeze_quant8_1) {
     execute(squeeze_quant8_1::CreateModel,
             squeeze_quant8_1::is_ignored,
-            squeeze_quant8_1::examples);
+            squeeze_quant8_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/squeeze_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/squeeze_relaxed.mod.py.cpp
index 6408dc5..33264a8 100644
--- a/nn/runtime/test/generated/tests/squeeze_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/squeeze_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, squeeze_relaxed) {
     execute(squeeze_relaxed::CreateModel,
             squeeze_relaxed::is_ignored,
-            squeeze_relaxed::examples);
+            squeeze_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice.mod.py.cpp
index 712247b..9d85243 100644
--- a/nn/runtime/test/generated/tests/strided_slice.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice) {
     execute(strided_slice::CreateModel,
             strided_slice::is_ignored,
-            strided_slice::examples);
+            strided_slice::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_float16.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_float16.mod.py.cpp
new file mode 100644
index 0000000..7f283ed
--- /dev/null
+++ b/nn/runtime/test/generated/tests/strided_slice_float16.mod.py.cpp
@@ -0,0 +1,17 @@
+// clang-format off
+// Generated file (from: strided_slice_float16.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace strided_slice_float16 {
+// Generated strided_slice_float16 test
+#include "generated/examples/strided_slice_float16.example.cpp"
+// Generated model constructor
+#include "generated/models/strided_slice_float16.model.cpp"
+} // namespace strided_slice_float16
+
+TEST_F(GeneratedTests, strided_slice_float16) {
+    execute(strided_slice_float16::CreateModel,
+            strided_slice_float16::is_ignored,
+            strided_slice_float16::get_examples());
+}
+
diff --git a/nn/runtime/test/generated/tests/strided_slice_float_1.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_float_1.mod.py.cpp
index 4709bf9..774b482 100644
--- a/nn/runtime/test/generated/tests/strided_slice_float_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_float_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_float_1) {
     execute(strided_slice_float_1::CreateModel,
             strided_slice_float_1::is_ignored,
-            strided_slice_float_1::examples);
+            strided_slice_float_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_float_10.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_float_10.mod.py.cpp
index 2fec58f..718f8b9 100644
--- a/nn/runtime/test/generated/tests/strided_slice_float_10.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_float_10.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_float_10) {
     execute(strided_slice_float_10::CreateModel,
             strided_slice_float_10::is_ignored,
-            strided_slice_float_10::examples);
+            strided_slice_float_10::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_float_10_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_float_10_relaxed.mod.py.cpp
index 7eaf1a5..1a356c0 100644
--- a/nn/runtime/test/generated/tests/strided_slice_float_10_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_float_10_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_float_10_relaxed) {
     execute(strided_slice_float_10_relaxed::CreateModel,
             strided_slice_float_10_relaxed::is_ignored,
-            strided_slice_float_10_relaxed::examples);
+            strided_slice_float_10_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_float_11.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_float_11.mod.py.cpp
index b9dfb90..851a98b 100644
--- a/nn/runtime/test/generated/tests/strided_slice_float_11.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_float_11.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_float_11) {
     execute(strided_slice_float_11::CreateModel,
             strided_slice_float_11::is_ignored,
-            strided_slice_float_11::examples);
+            strided_slice_float_11::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_float_11_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_float_11_relaxed.mod.py.cpp
index c448aa1..30768dc 100644
--- a/nn/runtime/test/generated/tests/strided_slice_float_11_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_float_11_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_float_11_relaxed) {
     execute(strided_slice_float_11_relaxed::CreateModel,
             strided_slice_float_11_relaxed::is_ignored,
-            strided_slice_float_11_relaxed::examples);
+            strided_slice_float_11_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_float_1_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_float_1_relaxed.mod.py.cpp
index 1f5170f..770acef 100644
--- a/nn/runtime/test/generated/tests/strided_slice_float_1_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_float_1_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_float_1_relaxed) {
     execute(strided_slice_float_1_relaxed::CreateModel,
             strided_slice_float_1_relaxed::is_ignored,
-            strided_slice_float_1_relaxed::examples);
+            strided_slice_float_1_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_float_2.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_float_2.mod.py.cpp
index 62d3f0f..d08dc21 100644
--- a/nn/runtime/test/generated/tests/strided_slice_float_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_float_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_float_2) {
     execute(strided_slice_float_2::CreateModel,
             strided_slice_float_2::is_ignored,
-            strided_slice_float_2::examples);
+            strided_slice_float_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_float_2_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_float_2_relaxed.mod.py.cpp
index b9acff6..42452ff 100644
--- a/nn/runtime/test/generated/tests/strided_slice_float_2_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_float_2_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_float_2_relaxed) {
     execute(strided_slice_float_2_relaxed::CreateModel,
             strided_slice_float_2_relaxed::is_ignored,
-            strided_slice_float_2_relaxed::examples);
+            strided_slice_float_2_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_float_3.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_float_3.mod.py.cpp
index 2a1de16..7b91a0c 100644
--- a/nn/runtime/test/generated/tests/strided_slice_float_3.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_float_3.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_float_3) {
     execute(strided_slice_float_3::CreateModel,
             strided_slice_float_3::is_ignored,
-            strided_slice_float_3::examples);
+            strided_slice_float_3::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_float_3_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_float_3_relaxed.mod.py.cpp
index 4ae61c1..81653e3 100644
--- a/nn/runtime/test/generated/tests/strided_slice_float_3_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_float_3_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_float_3_relaxed) {
     execute(strided_slice_float_3_relaxed::CreateModel,
             strided_slice_float_3_relaxed::is_ignored,
-            strided_slice_float_3_relaxed::examples);
+            strided_slice_float_3_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_float_4.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_float_4.mod.py.cpp
index ccefad2..65936d1 100644
--- a/nn/runtime/test/generated/tests/strided_slice_float_4.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_float_4.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_float_4) {
     execute(strided_slice_float_4::CreateModel,
             strided_slice_float_4::is_ignored,
-            strided_slice_float_4::examples);
+            strided_slice_float_4::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_float_4_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_float_4_relaxed.mod.py.cpp
index fff27d1..be17a06 100644
--- a/nn/runtime/test/generated/tests/strided_slice_float_4_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_float_4_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_float_4_relaxed) {
     execute(strided_slice_float_4_relaxed::CreateModel,
             strided_slice_float_4_relaxed::is_ignored,
-            strided_slice_float_4_relaxed::examples);
+            strided_slice_float_4_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_float_5.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_float_5.mod.py.cpp
index 4ba997e..498646b 100644
--- a/nn/runtime/test/generated/tests/strided_slice_float_5.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_float_5.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_float_5) {
     execute(strided_slice_float_5::CreateModel,
             strided_slice_float_5::is_ignored,
-            strided_slice_float_5::examples);
+            strided_slice_float_5::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_float_5_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_float_5_relaxed.mod.py.cpp
index cf2257b..6e0db3f 100644
--- a/nn/runtime/test/generated/tests/strided_slice_float_5_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_float_5_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_float_5_relaxed) {
     execute(strided_slice_float_5_relaxed::CreateModel,
             strided_slice_float_5_relaxed::is_ignored,
-            strided_slice_float_5_relaxed::examples);
+            strided_slice_float_5_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_float_6.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_float_6.mod.py.cpp
index 2368890..bea8ed3 100644
--- a/nn/runtime/test/generated/tests/strided_slice_float_6.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_float_6.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_float_6) {
     execute(strided_slice_float_6::CreateModel,
             strided_slice_float_6::is_ignored,
-            strided_slice_float_6::examples);
+            strided_slice_float_6::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_float_6_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_float_6_relaxed.mod.py.cpp
index 2ebc6d5..f08b1f4 100644
--- a/nn/runtime/test/generated/tests/strided_slice_float_6_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_float_6_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_float_6_relaxed) {
     execute(strided_slice_float_6_relaxed::CreateModel,
             strided_slice_float_6_relaxed::is_ignored,
-            strided_slice_float_6_relaxed::examples);
+            strided_slice_float_6_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_float_7.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_float_7.mod.py.cpp
index d8b52a0..3b4a0d3 100644
--- a/nn/runtime/test/generated/tests/strided_slice_float_7.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_float_7.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_float_7) {
     execute(strided_slice_float_7::CreateModel,
             strided_slice_float_7::is_ignored,
-            strided_slice_float_7::examples);
+            strided_slice_float_7::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_float_7_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_float_7_relaxed.mod.py.cpp
index b4977fc..cc068ea 100644
--- a/nn/runtime/test/generated/tests/strided_slice_float_7_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_float_7_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_float_7_relaxed) {
     execute(strided_slice_float_7_relaxed::CreateModel,
             strided_slice_float_7_relaxed::is_ignored,
-            strided_slice_float_7_relaxed::examples);
+            strided_slice_float_7_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_float_8.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_float_8.mod.py.cpp
index 9d223f6..c53428d 100644
--- a/nn/runtime/test/generated/tests/strided_slice_float_8.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_float_8.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_float_8) {
     execute(strided_slice_float_8::CreateModel,
             strided_slice_float_8::is_ignored,
-            strided_slice_float_8::examples);
+            strided_slice_float_8::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_float_8_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_float_8_relaxed.mod.py.cpp
index a152114..668c4a6 100644
--- a/nn/runtime/test/generated/tests/strided_slice_float_8_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_float_8_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_float_8_relaxed) {
     execute(strided_slice_float_8_relaxed::CreateModel,
             strided_slice_float_8_relaxed::is_ignored,
-            strided_slice_float_8_relaxed::examples);
+            strided_slice_float_8_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_float_9.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_float_9.mod.py.cpp
index 3566b16..d542592 100644
--- a/nn/runtime/test/generated/tests/strided_slice_float_9.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_float_9.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_float_9) {
     execute(strided_slice_float_9::CreateModel,
             strided_slice_float_9::is_ignored,
-            strided_slice_float_9::examples);
+            strided_slice_float_9::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_float_9_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_float_9_relaxed.mod.py.cpp
index 3c9697a..0457e7d 100644
--- a/nn/runtime/test/generated/tests/strided_slice_float_9_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_float_9_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_float_9_relaxed) {
     execute(strided_slice_float_9_relaxed::CreateModel,
             strided_slice_float_9_relaxed::is_ignored,
-            strided_slice_float_9_relaxed::examples);
+            strided_slice_float_9_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_qaunt8_10.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_qaunt8_10.mod.py.cpp
index a7d12ac..575a016 100644
--- a/nn/runtime/test/generated/tests/strided_slice_qaunt8_10.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_qaunt8_10.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_qaunt8_10) {
     execute(strided_slice_qaunt8_10::CreateModel,
             strided_slice_qaunt8_10::is_ignored,
-            strided_slice_qaunt8_10::examples);
+            strided_slice_qaunt8_10::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_qaunt8_11.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_qaunt8_11.mod.py.cpp
index f2016c2..3e52a6d 100644
--- a/nn/runtime/test/generated/tests/strided_slice_qaunt8_11.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_qaunt8_11.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_qaunt8_11) {
     execute(strided_slice_qaunt8_11::CreateModel,
             strided_slice_qaunt8_11::is_ignored,
-            strided_slice_qaunt8_11::examples);
+            strided_slice_qaunt8_11::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_quant8_1.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_quant8_1.mod.py.cpp
index cf0edbb..d3c395e 100644
--- a/nn/runtime/test/generated/tests/strided_slice_quant8_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_quant8_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_quant8_1) {
     execute(strided_slice_quant8_1::CreateModel,
             strided_slice_quant8_1::is_ignored,
-            strided_slice_quant8_1::examples);
+            strided_slice_quant8_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_quant8_2.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_quant8_2.mod.py.cpp
index 494c62b..9c73eb8 100644
--- a/nn/runtime/test/generated/tests/strided_slice_quant8_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_quant8_2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_quant8_2) {
     execute(strided_slice_quant8_2::CreateModel,
             strided_slice_quant8_2::is_ignored,
-            strided_slice_quant8_2::examples);
+            strided_slice_quant8_2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_quant8_3.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_quant8_3.mod.py.cpp
index 87ba7c1..5028a2e 100644
--- a/nn/runtime/test/generated/tests/strided_slice_quant8_3.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_quant8_3.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_quant8_3) {
     execute(strided_slice_quant8_3::CreateModel,
             strided_slice_quant8_3::is_ignored,
-            strided_slice_quant8_3::examples);
+            strided_slice_quant8_3::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_quant8_4.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_quant8_4.mod.py.cpp
index d11566a..d5e862c 100644
--- a/nn/runtime/test/generated/tests/strided_slice_quant8_4.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_quant8_4.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_quant8_4) {
     execute(strided_slice_quant8_4::CreateModel,
             strided_slice_quant8_4::is_ignored,
-            strided_slice_quant8_4::examples);
+            strided_slice_quant8_4::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_quant8_5.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_quant8_5.mod.py.cpp
index fa4276e..c7cc2fa 100644
--- a/nn/runtime/test/generated/tests/strided_slice_quant8_5.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_quant8_5.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_quant8_5) {
     execute(strided_slice_quant8_5::CreateModel,
             strided_slice_quant8_5::is_ignored,
-            strided_slice_quant8_5::examples);
+            strided_slice_quant8_5::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_quant8_6.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_quant8_6.mod.py.cpp
index 6641636..b60e227 100644
--- a/nn/runtime/test/generated/tests/strided_slice_quant8_6.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_quant8_6.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_quant8_6) {
     execute(strided_slice_quant8_6::CreateModel,
             strided_slice_quant8_6::is_ignored,
-            strided_slice_quant8_6::examples);
+            strided_slice_quant8_6::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_quant8_7.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_quant8_7.mod.py.cpp
index f900083..f5ceb42 100644
--- a/nn/runtime/test/generated/tests/strided_slice_quant8_7.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_quant8_7.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_quant8_7) {
     execute(strided_slice_quant8_7::CreateModel,
             strided_slice_quant8_7::is_ignored,
-            strided_slice_quant8_7::examples);
+            strided_slice_quant8_7::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_quant8_8.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_quant8_8.mod.py.cpp
index cb4289f..096349d 100644
--- a/nn/runtime/test/generated/tests/strided_slice_quant8_8.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_quant8_8.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_quant8_8) {
     execute(strided_slice_quant8_8::CreateModel,
             strided_slice_quant8_8::is_ignored,
-            strided_slice_quant8_8::examples);
+            strided_slice_quant8_8::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_quant8_9.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_quant8_9.mod.py.cpp
index 0d394d0..dfc993c 100644
--- a/nn/runtime/test/generated/tests/strided_slice_quant8_9.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_quant8_9.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_quant8_9) {
     execute(strided_slice_quant8_9::CreateModel,
             strided_slice_quant8_9::is_ignored,
-            strided_slice_quant8_9::examples);
+            strided_slice_quant8_9::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/strided_slice_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_relaxed.mod.py.cpp
index 100a0a1..7f45872 100644
--- a/nn/runtime/test/generated/tests/strided_slice_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/strided_slice_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, strided_slice_relaxed) {
     execute(strided_slice_relaxed::CreateModel,
             strided_slice_relaxed::is_ignored,
-            strided_slice_relaxed::examples);
+            strided_slice_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/sub.mod.py.cpp b/nn/runtime/test/generated/tests/sub.mod.py.cpp
index 730f462..76fcc24 100644
--- a/nn/runtime/test/generated/tests/sub.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/sub.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, sub) {
     execute(sub::CreateModel,
             sub::is_ignored,
-            sub::examples);
+            sub::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/sub_broadcast_float.mod.py.cpp b/nn/runtime/test/generated/tests/sub_broadcast_float.mod.py.cpp
index 4228046..8905da1 100644
--- a/nn/runtime/test/generated/tests/sub_broadcast_float.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/sub_broadcast_float.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, sub_broadcast_float) {
     execute(sub_broadcast_float::CreateModel,
             sub_broadcast_float::is_ignored,
-            sub_broadcast_float::examples);
+            sub_broadcast_float::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/sub_broadcast_float_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/sub_broadcast_float_relaxed.mod.py.cpp
index 4e5bfa6..3937bd3 100644
--- a/nn/runtime/test/generated/tests/sub_broadcast_float_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/sub_broadcast_float_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, sub_broadcast_float_relaxed) {
     execute(sub_broadcast_float_relaxed::CreateModel,
             sub_broadcast_float_relaxed::is_ignored,
-            sub_broadcast_float_relaxed::examples);
+            sub_broadcast_float_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/sub_float16.mod.py.cpp b/nn/runtime/test/generated/tests/sub_float16.mod.py.cpp
index 7b984ba..16bb702 100644
--- a/nn/runtime/test/generated/tests/sub_float16.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/sub_float16.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, sub_float16) {
     execute(sub_float16::CreateModel,
             sub_float16::is_ignored,
-            sub_float16::examples);
+            sub_float16::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/sub_float16_broadcast.mod.py.cpp b/nn/runtime/test/generated/tests/sub_float16_broadcast.mod.py.cpp
index c4536be..c6c4a29 100644
--- a/nn/runtime/test/generated/tests/sub_float16_broadcast.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/sub_float16_broadcast.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, sub_float16_broadcast) {
     execute(sub_float16_broadcast::CreateModel,
             sub_float16_broadcast::is_ignored,
-            sub_float16_broadcast::examples);
+            sub_float16_broadcast::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/sub_quantized.mod.py.cpp b/nn/runtime/test/generated/tests/sub_quantized.mod.py.cpp
index 4a4e581..e56613e 100644
--- a/nn/runtime/test/generated/tests/sub_quantized.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/sub_quantized.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, sub_quantized) {
     execute(sub_quantized::CreateModel,
             sub_quantized::is_ignored,
-            sub_quantized::examples);
+            sub_quantized::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/sub_quantized_broadcast.mod.py.cpp b/nn/runtime/test/generated/tests/sub_quantized_broadcast.mod.py.cpp
index ed674fd..aadf039 100644
--- a/nn/runtime/test/generated/tests/sub_quantized_broadcast.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/sub_quantized_broadcast.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, sub_quantized_broadcast) {
     execute(sub_quantized_broadcast::CreateModel,
             sub_quantized_broadcast::is_ignored,
-            sub_quantized_broadcast::examples);
+            sub_quantized_broadcast::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/sub_quantized_different_scales.mod.py.cpp b/nn/runtime/test/generated/tests/sub_quantized_different_scales.mod.py.cpp
index 0fb5f33..4355068 100644
--- a/nn/runtime/test/generated/tests/sub_quantized_different_scales.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/sub_quantized_different_scales.mod.py.cpp
@@ -12,384 +12,384 @@
 TEST_F(GeneratedTests, sub_quantized_different_scales) {
     execute(sub_quantized_different_scales::CreateModel,
             sub_quantized_different_scales::is_ignored,
-            sub_quantized_different_scales::examples);
+            sub_quantized_different_scales::get_examples());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_2) {
     execute(sub_quantized_different_scales::CreateModel_2,
             sub_quantized_different_scales::is_ignored_2,
-            sub_quantized_different_scales::examples_2);
+            sub_quantized_different_scales::get_examples_2());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_3) {
     execute(sub_quantized_different_scales::CreateModel_3,
             sub_quantized_different_scales::is_ignored_3,
-            sub_quantized_different_scales::examples_3);
+            sub_quantized_different_scales::get_examples_3());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_4) {
     execute(sub_quantized_different_scales::CreateModel_4,
             sub_quantized_different_scales::is_ignored_4,
-            sub_quantized_different_scales::examples_4);
+            sub_quantized_different_scales::get_examples_4());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_5) {
     execute(sub_quantized_different_scales::CreateModel_5,
             sub_quantized_different_scales::is_ignored_5,
-            sub_quantized_different_scales::examples_5);
+            sub_quantized_different_scales::get_examples_5());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_6) {
     execute(sub_quantized_different_scales::CreateModel_6,
             sub_quantized_different_scales::is_ignored_6,
-            sub_quantized_different_scales::examples_6);
+            sub_quantized_different_scales::get_examples_6());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_7) {
     execute(sub_quantized_different_scales::CreateModel_7,
             sub_quantized_different_scales::is_ignored_7,
-            sub_quantized_different_scales::examples_7);
+            sub_quantized_different_scales::get_examples_7());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_8) {
     execute(sub_quantized_different_scales::CreateModel_8,
             sub_quantized_different_scales::is_ignored_8,
-            sub_quantized_different_scales::examples_8);
+            sub_quantized_different_scales::get_examples_8());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_9) {
     execute(sub_quantized_different_scales::CreateModel_9,
             sub_quantized_different_scales::is_ignored_9,
-            sub_quantized_different_scales::examples_9);
+            sub_quantized_different_scales::get_examples_9());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_10) {
     execute(sub_quantized_different_scales::CreateModel_10,
             sub_quantized_different_scales::is_ignored_10,
-            sub_quantized_different_scales::examples_10);
+            sub_quantized_different_scales::get_examples_10());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_11) {
     execute(sub_quantized_different_scales::CreateModel_11,
             sub_quantized_different_scales::is_ignored_11,
-            sub_quantized_different_scales::examples_11);
+            sub_quantized_different_scales::get_examples_11());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_12) {
     execute(sub_quantized_different_scales::CreateModel_12,
             sub_quantized_different_scales::is_ignored_12,
-            sub_quantized_different_scales::examples_12);
+            sub_quantized_different_scales::get_examples_12());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_13) {
     execute(sub_quantized_different_scales::CreateModel_13,
             sub_quantized_different_scales::is_ignored_13,
-            sub_quantized_different_scales::examples_13);
+            sub_quantized_different_scales::get_examples_13());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_14) {
     execute(sub_quantized_different_scales::CreateModel_14,
             sub_quantized_different_scales::is_ignored_14,
-            sub_quantized_different_scales::examples_14);
+            sub_quantized_different_scales::get_examples_14());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_15) {
     execute(sub_quantized_different_scales::CreateModel_15,
             sub_quantized_different_scales::is_ignored_15,
-            sub_quantized_different_scales::examples_15);
+            sub_quantized_different_scales::get_examples_15());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_16) {
     execute(sub_quantized_different_scales::CreateModel_16,
             sub_quantized_different_scales::is_ignored_16,
-            sub_quantized_different_scales::examples_16);
+            sub_quantized_different_scales::get_examples_16());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_17) {
     execute(sub_quantized_different_scales::CreateModel_17,
             sub_quantized_different_scales::is_ignored_17,
-            sub_quantized_different_scales::examples_17);
+            sub_quantized_different_scales::get_examples_17());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_18) {
     execute(sub_quantized_different_scales::CreateModel_18,
             sub_quantized_different_scales::is_ignored_18,
-            sub_quantized_different_scales::examples_18);
+            sub_quantized_different_scales::get_examples_18());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_19) {
     execute(sub_quantized_different_scales::CreateModel_19,
             sub_quantized_different_scales::is_ignored_19,
-            sub_quantized_different_scales::examples_19);
+            sub_quantized_different_scales::get_examples_19());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_20) {
     execute(sub_quantized_different_scales::CreateModel_20,
             sub_quantized_different_scales::is_ignored_20,
-            sub_quantized_different_scales::examples_20);
+            sub_quantized_different_scales::get_examples_20());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_21) {
     execute(sub_quantized_different_scales::CreateModel_21,
             sub_quantized_different_scales::is_ignored_21,
-            sub_quantized_different_scales::examples_21);
+            sub_quantized_different_scales::get_examples_21());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_22) {
     execute(sub_quantized_different_scales::CreateModel_22,
             sub_quantized_different_scales::is_ignored_22,
-            sub_quantized_different_scales::examples_22);
+            sub_quantized_different_scales::get_examples_22());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_23) {
     execute(sub_quantized_different_scales::CreateModel_23,
             sub_quantized_different_scales::is_ignored_23,
-            sub_quantized_different_scales::examples_23);
+            sub_quantized_different_scales::get_examples_23());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_24) {
     execute(sub_quantized_different_scales::CreateModel_24,
             sub_quantized_different_scales::is_ignored_24,
-            sub_quantized_different_scales::examples_24);
+            sub_quantized_different_scales::get_examples_24());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_25) {
     execute(sub_quantized_different_scales::CreateModel_25,
             sub_quantized_different_scales::is_ignored_25,
-            sub_quantized_different_scales::examples_25);
+            sub_quantized_different_scales::get_examples_25());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_26) {
     execute(sub_quantized_different_scales::CreateModel_26,
             sub_quantized_different_scales::is_ignored_26,
-            sub_quantized_different_scales::examples_26);
+            sub_quantized_different_scales::get_examples_26());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_27) {
     execute(sub_quantized_different_scales::CreateModel_27,
             sub_quantized_different_scales::is_ignored_27,
-            sub_quantized_different_scales::examples_27);
+            sub_quantized_different_scales::get_examples_27());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_28) {
     execute(sub_quantized_different_scales::CreateModel_28,
             sub_quantized_different_scales::is_ignored_28,
-            sub_quantized_different_scales::examples_28);
+            sub_quantized_different_scales::get_examples_28());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_29) {
     execute(sub_quantized_different_scales::CreateModel_29,
             sub_quantized_different_scales::is_ignored_29,
-            sub_quantized_different_scales::examples_29);
+            sub_quantized_different_scales::get_examples_29());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_30) {
     execute(sub_quantized_different_scales::CreateModel_30,
             sub_quantized_different_scales::is_ignored_30,
-            sub_quantized_different_scales::examples_30);
+            sub_quantized_different_scales::get_examples_30());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_31) {
     execute(sub_quantized_different_scales::CreateModel_31,
             sub_quantized_different_scales::is_ignored_31,
-            sub_quantized_different_scales::examples_31);
+            sub_quantized_different_scales::get_examples_31());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_32) {
     execute(sub_quantized_different_scales::CreateModel_32,
             sub_quantized_different_scales::is_ignored_32,
-            sub_quantized_different_scales::examples_32);
+            sub_quantized_different_scales::get_examples_32());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_33) {
     execute(sub_quantized_different_scales::CreateModel_33,
             sub_quantized_different_scales::is_ignored_33,
-            sub_quantized_different_scales::examples_33);
+            sub_quantized_different_scales::get_examples_33());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_34) {
     execute(sub_quantized_different_scales::CreateModel_34,
             sub_quantized_different_scales::is_ignored_34,
-            sub_quantized_different_scales::examples_34);
+            sub_quantized_different_scales::get_examples_34());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_35) {
     execute(sub_quantized_different_scales::CreateModel_35,
             sub_quantized_different_scales::is_ignored_35,
-            sub_quantized_different_scales::examples_35);
+            sub_quantized_different_scales::get_examples_35());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_36) {
     execute(sub_quantized_different_scales::CreateModel_36,
             sub_quantized_different_scales::is_ignored_36,
-            sub_quantized_different_scales::examples_36);
+            sub_quantized_different_scales::get_examples_36());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_37) {
     execute(sub_quantized_different_scales::CreateModel_37,
             sub_quantized_different_scales::is_ignored_37,
-            sub_quantized_different_scales::examples_37);
+            sub_quantized_different_scales::get_examples_37());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_38) {
     execute(sub_quantized_different_scales::CreateModel_38,
             sub_quantized_different_scales::is_ignored_38,
-            sub_quantized_different_scales::examples_38);
+            sub_quantized_different_scales::get_examples_38());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_39) {
     execute(sub_quantized_different_scales::CreateModel_39,
             sub_quantized_different_scales::is_ignored_39,
-            sub_quantized_different_scales::examples_39);
+            sub_quantized_different_scales::get_examples_39());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_40) {
     execute(sub_quantized_different_scales::CreateModel_40,
             sub_quantized_different_scales::is_ignored_40,
-            sub_quantized_different_scales::examples_40);
+            sub_quantized_different_scales::get_examples_40());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_41) {
     execute(sub_quantized_different_scales::CreateModel_41,
             sub_quantized_different_scales::is_ignored_41,
-            sub_quantized_different_scales::examples_41);
+            sub_quantized_different_scales::get_examples_41());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_42) {
     execute(sub_quantized_different_scales::CreateModel_42,
             sub_quantized_different_scales::is_ignored_42,
-            sub_quantized_different_scales::examples_42);
+            sub_quantized_different_scales::get_examples_42());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_43) {
     execute(sub_quantized_different_scales::CreateModel_43,
             sub_quantized_different_scales::is_ignored_43,
-            sub_quantized_different_scales::examples_43);
+            sub_quantized_different_scales::get_examples_43());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_44) {
     execute(sub_quantized_different_scales::CreateModel_44,
             sub_quantized_different_scales::is_ignored_44,
-            sub_quantized_different_scales::examples_44);
+            sub_quantized_different_scales::get_examples_44());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_45) {
     execute(sub_quantized_different_scales::CreateModel_45,
             sub_quantized_different_scales::is_ignored_45,
-            sub_quantized_different_scales::examples_45);
+            sub_quantized_different_scales::get_examples_45());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_46) {
     execute(sub_quantized_different_scales::CreateModel_46,
             sub_quantized_different_scales::is_ignored_46,
-            sub_quantized_different_scales::examples_46);
+            sub_quantized_different_scales::get_examples_46());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_47) {
     execute(sub_quantized_different_scales::CreateModel_47,
             sub_quantized_different_scales::is_ignored_47,
-            sub_quantized_different_scales::examples_47);
+            sub_quantized_different_scales::get_examples_47());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_48) {
     execute(sub_quantized_different_scales::CreateModel_48,
             sub_quantized_different_scales::is_ignored_48,
-            sub_quantized_different_scales::examples_48);
+            sub_quantized_different_scales::get_examples_48());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_49) {
     execute(sub_quantized_different_scales::CreateModel_49,
             sub_quantized_different_scales::is_ignored_49,
-            sub_quantized_different_scales::examples_49);
+            sub_quantized_different_scales::get_examples_49());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_50) {
     execute(sub_quantized_different_scales::CreateModel_50,
             sub_quantized_different_scales::is_ignored_50,
-            sub_quantized_different_scales::examples_50);
+            sub_quantized_different_scales::get_examples_50());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_51) {
     execute(sub_quantized_different_scales::CreateModel_51,
             sub_quantized_different_scales::is_ignored_51,
-            sub_quantized_different_scales::examples_51);
+            sub_quantized_different_scales::get_examples_51());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_52) {
     execute(sub_quantized_different_scales::CreateModel_52,
             sub_quantized_different_scales::is_ignored_52,
-            sub_quantized_different_scales::examples_52);
+            sub_quantized_different_scales::get_examples_52());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_53) {
     execute(sub_quantized_different_scales::CreateModel_53,
             sub_quantized_different_scales::is_ignored_53,
-            sub_quantized_different_scales::examples_53);
+            sub_quantized_different_scales::get_examples_53());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_54) {
     execute(sub_quantized_different_scales::CreateModel_54,
             sub_quantized_different_scales::is_ignored_54,
-            sub_quantized_different_scales::examples_54);
+            sub_quantized_different_scales::get_examples_54());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_55) {
     execute(sub_quantized_different_scales::CreateModel_55,
             sub_quantized_different_scales::is_ignored_55,
-            sub_quantized_different_scales::examples_55);
+            sub_quantized_different_scales::get_examples_55());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_56) {
     execute(sub_quantized_different_scales::CreateModel_56,
             sub_quantized_different_scales::is_ignored_56,
-            sub_quantized_different_scales::examples_56);
+            sub_quantized_different_scales::get_examples_56());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_57) {
     execute(sub_quantized_different_scales::CreateModel_57,
             sub_quantized_different_scales::is_ignored_57,
-            sub_quantized_different_scales::examples_57);
+            sub_quantized_different_scales::get_examples_57());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_58) {
     execute(sub_quantized_different_scales::CreateModel_58,
             sub_quantized_different_scales::is_ignored_58,
-            sub_quantized_different_scales::examples_58);
+            sub_quantized_different_scales::get_examples_58());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_59) {
     execute(sub_quantized_different_scales::CreateModel_59,
             sub_quantized_different_scales::is_ignored_59,
-            sub_quantized_different_scales::examples_59);
+            sub_quantized_different_scales::get_examples_59());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_60) {
     execute(sub_quantized_different_scales::CreateModel_60,
             sub_quantized_different_scales::is_ignored_60,
-            sub_quantized_different_scales::examples_60);
+            sub_quantized_different_scales::get_examples_60());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_61) {
     execute(sub_quantized_different_scales::CreateModel_61,
             sub_quantized_different_scales::is_ignored_61,
-            sub_quantized_different_scales::examples_61);
+            sub_quantized_different_scales::get_examples_61());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_62) {
     execute(sub_quantized_different_scales::CreateModel_62,
             sub_quantized_different_scales::is_ignored_62,
-            sub_quantized_different_scales::examples_62);
+            sub_quantized_different_scales::get_examples_62());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_63) {
     execute(sub_quantized_different_scales::CreateModel_63,
             sub_quantized_different_scales::is_ignored_63,
-            sub_quantized_different_scales::examples_63);
+            sub_quantized_different_scales::get_examples_63());
 }
 
 TEST_F(GeneratedTests, sub_quantized_different_scales_64) {
     execute(sub_quantized_different_scales::CreateModel_64,
             sub_quantized_different_scales::is_ignored_64,
-            sub_quantized_different_scales::examples_64);
+            sub_quantized_different_scales::get_examples_64());
 }
 
diff --git a/nn/runtime/test/generated/tests/sub_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/sub_relaxed.mod.py.cpp
index 9d21ec8..faebaa6 100644
--- a/nn/runtime/test/generated/tests/sub_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/sub_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, sub_relaxed) {
     execute(sub_relaxed::CreateModel,
             sub_relaxed::is_ignored,
-            sub_relaxed::examples);
+            sub_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/svdf.mod.py.cpp b/nn/runtime/test/generated/tests/svdf.mod.py.cpp
index d20d673..53b0c7b 100644
--- a/nn/runtime/test/generated/tests/svdf.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/svdf.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, svdf) {
     execute(svdf::CreateModel,
             svdf::is_ignored,
-            svdf::examples);
+            svdf::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/svdf2.mod.py.cpp b/nn/runtime/test/generated/tests/svdf2.mod.py.cpp
index 5da560c..8e2aec5 100644
--- a/nn/runtime/test/generated/tests/svdf2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/svdf2.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, svdf2) {
     execute(svdf2::CreateModel,
             svdf2::is_ignored,
-            svdf2::examples);
+            svdf2::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/svdf2_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/svdf2_relaxed.mod.py.cpp
index 2992455..5f4a24b 100644
--- a/nn/runtime/test/generated/tests/svdf2_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/svdf2_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, svdf2_relaxed) {
     execute(svdf2_relaxed::CreateModel,
             svdf2_relaxed::is_ignored,
-            svdf2_relaxed::examples);
+            svdf2_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/svdf_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/svdf_relaxed.mod.py.cpp
index c1c496c..28c66f5 100644
--- a/nn/runtime/test/generated/tests/svdf_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/svdf_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, svdf_relaxed) {
     execute(svdf_relaxed::CreateModel,
             svdf_relaxed::is_ignored,
-            svdf_relaxed::examples);
+            svdf_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/svdf_state.mod.py.cpp b/nn/runtime/test/generated/tests/svdf_state.mod.py.cpp
index 6602360..de746d7 100644
--- a/nn/runtime/test/generated/tests/svdf_state.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/svdf_state.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, svdf_state) {
     execute(svdf_state::CreateModel,
             svdf_state::is_ignored,
-            svdf_state::examples);
+            svdf_state::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/svdf_state_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/svdf_state_relaxed.mod.py.cpp
index 3dac9e3..130a7c8 100644
--- a/nn/runtime/test/generated/tests/svdf_state_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/svdf_state_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, svdf_state_relaxed) {
     execute(svdf_state_relaxed::CreateModel,
             svdf_state_relaxed::is_ignored,
-            svdf_state_relaxed::examples);
+            svdf_state_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/tanh.mod.py.cpp b/nn/runtime/test/generated/tests/tanh.mod.py.cpp
index 5e93892..6df3e14 100644
--- a/nn/runtime/test/generated/tests/tanh.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/tanh.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, tanh) {
     execute(tanh::CreateModel,
             tanh::is_ignored,
-            tanh::examples);
+            tanh::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/tanh_float16.mod.py.cpp b/nn/runtime/test/generated/tests/tanh_float16.mod.py.cpp
index 9016081..3d76f54 100644
--- a/nn/runtime/test/generated/tests/tanh_float16.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/tanh_float16.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, tanh_float16) {
     execute(tanh_float16::CreateModel,
             tanh_float16::is_ignored,
-            tanh_float16::examples);
+            tanh_float16::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/tanh_quantized.mod.py.cpp b/nn/runtime/test/generated/tests/tanh_quantized.mod.py.cpp
index 306421c..79ff989 100644
--- a/nn/runtime/test/generated/tests/tanh_quantized.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/tanh_quantized.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, tanh_quantized) {
     execute(tanh_quantized::CreateModel,
             tanh_quantized::is_ignored,
-            tanh_quantized::examples);
+            tanh_quantized::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/tanh_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/tanh_relaxed.mod.py.cpp
index 60138bd..2a4fe5b 100644
--- a/nn/runtime/test/generated/tests/tanh_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/tanh_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, tanh_relaxed) {
     execute(tanh_relaxed::CreateModel,
             tanh_relaxed::is_ignored,
-            tanh_relaxed::examples);
+            tanh_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/tile_1.mod.py.cpp b/nn/runtime/test/generated/tests/tile_1.mod.py.cpp
index 1a1c935..4efdbb5 100644
--- a/nn/runtime/test/generated/tests/tile_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/tile_1.mod.py.cpp
@@ -12,24 +12,24 @@
 TEST_F(GeneratedTests, tile_1) {
     execute(tile_1::CreateModel,
             tile_1::is_ignored,
-            tile_1::examples);
+            tile_1::get_examples());
 }
 
 TEST_F(GeneratedTests, tile_1_relaxed) {
     execute(tile_1::CreateModel_relaxed,
             tile_1::is_ignored_relaxed,
-            tile_1::examples_relaxed);
+            tile_1::get_examples_relaxed());
 }
 
 TEST_F(GeneratedTests, tile_1_float16) {
     execute(tile_1::CreateModel_float16,
             tile_1::is_ignored_float16,
-            tile_1::examples_float16);
+            tile_1::get_examples_float16());
 }
 
 TEST_F(GeneratedTests, tile_1_quant8) {
     execute(tile_1::CreateModel_quant8,
             tile_1::is_ignored_quant8,
-            tile_1::examples_quant8);
+            tile_1::get_examples_quant8());
 }
 
diff --git a/nn/runtime/test/generated/tests/tile_2.mod.py.cpp b/nn/runtime/test/generated/tests/tile_2.mod.py.cpp
index 3e68f86..2a19141 100644
--- a/nn/runtime/test/generated/tests/tile_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/tile_2.mod.py.cpp
@@ -12,30 +12,30 @@
 TEST_F(GeneratedTests, tile_2) {
     execute(tile_2::CreateModel,
             tile_2::is_ignored,
-            tile_2::examples);
+            tile_2::get_examples());
 }
 
 TEST_F(GeneratedTests, tile_2_relaxed) {
     execute(tile_2::CreateModel_relaxed,
             tile_2::is_ignored_relaxed,
-            tile_2::examples_relaxed);
+            tile_2::get_examples_relaxed());
 }
 
 TEST_F(GeneratedTests, tile_2_float16) {
     execute(tile_2::CreateModel_float16,
             tile_2::is_ignored_float16,
-            tile_2::examples_float16);
+            tile_2::get_examples_float16());
 }
 
 TEST_F(GeneratedTests, tile_2_quant8) {
     execute(tile_2::CreateModel_quant8,
             tile_2::is_ignored_quant8,
-            tile_2::examples_quant8);
+            tile_2::get_examples_quant8());
 }
 
 TEST_F(GeneratedTests, tile_2_int32) {
     execute(tile_2::CreateModel_int32,
             tile_2::is_ignored_int32,
-            tile_2::examples_int32);
+            tile_2::get_examples_int32());
 }
 
diff --git a/nn/runtime/test/generated/tests/tile_3.mod.py.cpp b/nn/runtime/test/generated/tests/tile_3.mod.py.cpp
index 0068ac5..aee292b 100644
--- a/nn/runtime/test/generated/tests/tile_3.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/tile_3.mod.py.cpp
@@ -12,30 +12,30 @@
 TEST_F(GeneratedTests, tile_3) {
     execute(tile_3::CreateModel,
             tile_3::is_ignored,
-            tile_3::examples);
+            tile_3::get_examples());
 }
 
 TEST_F(GeneratedTests, tile_3_relaxed) {
     execute(tile_3::CreateModel_relaxed,
             tile_3::is_ignored_relaxed,
-            tile_3::examples_relaxed);
+            tile_3::get_examples_relaxed());
 }
 
 TEST_F(GeneratedTests, tile_3_float16) {
     execute(tile_3::CreateModel_float16,
             tile_3::is_ignored_float16,
-            tile_3::examples_float16);
+            tile_3::get_examples_float16());
 }
 
 TEST_F(GeneratedTests, tile_3_quant8) {
     execute(tile_3::CreateModel_quant8,
             tile_3::is_ignored_quant8,
-            tile_3::examples_quant8);
+            tile_3::get_examples_quant8());
 }
 
 TEST_F(GeneratedTests, tile_3_int32) {
     execute(tile_3::CreateModel_int32,
             tile_3::is_ignored_int32,
-            tile_3::examples_int32);
+            tile_3::get_examples_int32());
 }
 
diff --git a/nn/runtime/test/generated/tests/topk_v2.mod.py.cpp b/nn/runtime/test/generated/tests/topk_v2.mod.py.cpp
index 8075450..655bb5f 100644
--- a/nn/runtime/test/generated/tests/topk_v2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/topk_v2.mod.py.cpp
@@ -12,108 +12,108 @@
 TEST_F(GeneratedTests, topk_v2) {
     execute(topk_v2::CreateModel,
             topk_v2::is_ignored,
-            topk_v2::examples);
+            topk_v2::get_examples());
 }
 
 TEST_F(GeneratedTests, topk_v2_relaxed) {
     execute(topk_v2::CreateModel_relaxed,
             topk_v2::is_ignored_relaxed,
-            topk_v2::examples_relaxed);
+            topk_v2::get_examples_relaxed());
 }
 
 TEST_F(GeneratedTests, topk_v2_float16) {
     execute(topk_v2::CreateModel_float16,
             topk_v2::is_ignored_float16,
-            topk_v2::examples_float16);
+            topk_v2::get_examples_float16());
 }
 
 TEST_F(GeneratedTests, topk_v2_2) {
     execute(topk_v2::CreateModel_2,
             topk_v2::is_ignored_2,
-            topk_v2::examples_2);
+            topk_v2::get_examples_2());
 }
 
 TEST_F(GeneratedTests, topk_v2_relaxed_2) {
     execute(topk_v2::CreateModel_relaxed_2,
             topk_v2::is_ignored_relaxed_2,
-            topk_v2::examples_relaxed_2);
+            topk_v2::get_examples_relaxed_2());
 }
 
 TEST_F(GeneratedTests, topk_v2_float16_2) {
     execute(topk_v2::CreateModel_float16_2,
             topk_v2::is_ignored_float16_2,
-            topk_v2::examples_float16_2);
+            topk_v2::get_examples_float16_2());
 }
 
 TEST_F(GeneratedTests, topk_v2_3) {
     execute(topk_v2::CreateModel_3,
             topk_v2::is_ignored_3,
-            topk_v2::examples_3);
+            topk_v2::get_examples_3());
 }
 
 TEST_F(GeneratedTests, topk_v2_relaxed_3) {
     execute(topk_v2::CreateModel_relaxed_3,
             topk_v2::is_ignored_relaxed_3,
-            topk_v2::examples_relaxed_3);
+            topk_v2::get_examples_relaxed_3());
 }
 
 TEST_F(GeneratedTests, topk_v2_float16_3) {
     execute(topk_v2::CreateModel_float16_3,
             topk_v2::is_ignored_float16_3,
-            topk_v2::examples_float16_3);
+            topk_v2::get_examples_float16_3());
 }
 
 TEST_F(GeneratedTests, topk_v2_4) {
     execute(topk_v2::CreateModel_4,
             topk_v2::is_ignored_4,
-            topk_v2::examples_4);
+            topk_v2::get_examples_4());
 }
 
 TEST_F(GeneratedTests, topk_v2_relaxed_4) {
     execute(topk_v2::CreateModel_relaxed_4,
             topk_v2::is_ignored_relaxed_4,
-            topk_v2::examples_relaxed_4);
+            topk_v2::get_examples_relaxed_4());
 }
 
 TEST_F(GeneratedTests, topk_v2_float16_4) {
     execute(topk_v2::CreateModel_float16_4,
             topk_v2::is_ignored_float16_4,
-            topk_v2::examples_float16_4);
+            topk_v2::get_examples_float16_4());
 }
 
 TEST_F(GeneratedTests, topk_v2_5) {
     execute(topk_v2::CreateModel_5,
             topk_v2::is_ignored_5,
-            topk_v2::examples_5);
+            topk_v2::get_examples_5());
 }
 
 TEST_F(GeneratedTests, topk_v2_relaxed_5) {
     execute(topk_v2::CreateModel_relaxed_5,
             topk_v2::is_ignored_relaxed_5,
-            topk_v2::examples_relaxed_5);
+            topk_v2::get_examples_relaxed_5());
 }
 
 TEST_F(GeneratedTests, topk_v2_float16_5) {
     execute(topk_v2::CreateModel_float16_5,
             topk_v2::is_ignored_float16_5,
-            topk_v2::examples_float16_5);
+            topk_v2::get_examples_float16_5());
 }
 
 TEST_F(GeneratedTests, topk_v2_6) {
     execute(topk_v2::CreateModel_6,
             topk_v2::is_ignored_6,
-            topk_v2::examples_6);
+            topk_v2::get_examples_6());
 }
 
 TEST_F(GeneratedTests, topk_v2_relaxed_6) {
     execute(topk_v2::CreateModel_relaxed_6,
             topk_v2::is_ignored_relaxed_6,
-            topk_v2::examples_relaxed_6);
+            topk_v2::get_examples_relaxed_6());
 }
 
 TEST_F(GeneratedTests, topk_v2_float16_6) {
     execute(topk_v2::CreateModel_float16_6,
             topk_v2::is_ignored_float16_6,
-            topk_v2::examples_float16_6);
+            topk_v2::get_examples_float16_6());
 }
 
diff --git a/nn/runtime/test/generated/tests/transpose.mod.py.cpp b/nn/runtime/test/generated/tests/transpose.mod.py.cpp
index 3957078..3d5b61a 100644
--- a/nn/runtime/test/generated/tests/transpose.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/transpose.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, transpose) {
     execute(transpose::CreateModel,
             transpose::is_ignored,
-            transpose::examples);
+            transpose::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/transpose_conv2d.mod.py.cpp b/nn/runtime/test/generated/tests/transpose_conv2d.mod.py.cpp
index 0dfa347..10ff1b5 100644
--- a/nn/runtime/test/generated/tests/transpose_conv2d.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/transpose_conv2d.mod.py.cpp
@@ -12,576 +12,576 @@
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_none) {
     execute(transpose_conv2d::CreateModel_nhwc_none,
             transpose_conv2d::is_ignored_nhwc_none,
-            transpose_conv2d::examples_nhwc_none);
+            transpose_conv2d::get_examples_nhwc_none());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_none_weight_as_input) {
     execute(transpose_conv2d::CreateModel_nhwc_none_weight_as_input,
             transpose_conv2d::is_ignored_nhwc_none_weight_as_input,
-            transpose_conv2d::examples_nhwc_none_weight_as_input);
+            transpose_conv2d::get_examples_nhwc_none_weight_as_input());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_none_relaxed) {
     execute(transpose_conv2d::CreateModel_nhwc_none_relaxed,
             transpose_conv2d::is_ignored_nhwc_none_relaxed,
-            transpose_conv2d::examples_nhwc_none_relaxed);
+            transpose_conv2d::get_examples_nhwc_none_relaxed());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_none_relaxed_weight_as_input) {
     execute(transpose_conv2d::CreateModel_nhwc_none_relaxed_weight_as_input,
             transpose_conv2d::is_ignored_nhwc_none_relaxed_weight_as_input,
-            transpose_conv2d::examples_nhwc_none_relaxed_weight_as_input);
+            transpose_conv2d::get_examples_nhwc_none_relaxed_weight_as_input());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_none_quant8) {
     execute(transpose_conv2d::CreateModel_nhwc_none_quant8,
             transpose_conv2d::is_ignored_nhwc_none_quant8,
-            transpose_conv2d::examples_nhwc_none_quant8);
+            transpose_conv2d::get_examples_nhwc_none_quant8());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_none_quant8_weight_as_input) {
     execute(transpose_conv2d::CreateModel_nhwc_none_quant8_weight_as_input,
             transpose_conv2d::is_ignored_nhwc_none_quant8_weight_as_input,
-            transpose_conv2d::examples_nhwc_none_quant8_weight_as_input);
+            transpose_conv2d::get_examples_nhwc_none_quant8_weight_as_input());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_relu) {
     execute(transpose_conv2d::CreateModel_nhwc_relu,
             transpose_conv2d::is_ignored_nhwc_relu,
-            transpose_conv2d::examples_nhwc_relu);
+            transpose_conv2d::get_examples_nhwc_relu());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_relu_weight_as_input) {
     execute(transpose_conv2d::CreateModel_nhwc_relu_weight_as_input,
             transpose_conv2d::is_ignored_nhwc_relu_weight_as_input,
-            transpose_conv2d::examples_nhwc_relu_weight_as_input);
+            transpose_conv2d::get_examples_nhwc_relu_weight_as_input());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_relu_relaxed) {
     execute(transpose_conv2d::CreateModel_nhwc_relu_relaxed,
             transpose_conv2d::is_ignored_nhwc_relu_relaxed,
-            transpose_conv2d::examples_nhwc_relu_relaxed);
+            transpose_conv2d::get_examples_nhwc_relu_relaxed());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_relu_relaxed_weight_as_input) {
     execute(transpose_conv2d::CreateModel_nhwc_relu_relaxed_weight_as_input,
             transpose_conv2d::is_ignored_nhwc_relu_relaxed_weight_as_input,
-            transpose_conv2d::examples_nhwc_relu_relaxed_weight_as_input);
+            transpose_conv2d::get_examples_nhwc_relu_relaxed_weight_as_input());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_relu_quant8) {
     execute(transpose_conv2d::CreateModel_nhwc_relu_quant8,
             transpose_conv2d::is_ignored_nhwc_relu_quant8,
-            transpose_conv2d::examples_nhwc_relu_quant8);
+            transpose_conv2d::get_examples_nhwc_relu_quant8());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_relu_quant8_weight_as_input) {
     execute(transpose_conv2d::CreateModel_nhwc_relu_quant8_weight_as_input,
             transpose_conv2d::is_ignored_nhwc_relu_quant8_weight_as_input,
-            transpose_conv2d::examples_nhwc_relu_quant8_weight_as_input);
+            transpose_conv2d::get_examples_nhwc_relu_quant8_weight_as_input());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_relu1) {
     execute(transpose_conv2d::CreateModel_nhwc_relu1,
             transpose_conv2d::is_ignored_nhwc_relu1,
-            transpose_conv2d::examples_nhwc_relu1);
+            transpose_conv2d::get_examples_nhwc_relu1());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_relu1_weight_as_input) {
     execute(transpose_conv2d::CreateModel_nhwc_relu1_weight_as_input,
             transpose_conv2d::is_ignored_nhwc_relu1_weight_as_input,
-            transpose_conv2d::examples_nhwc_relu1_weight_as_input);
+            transpose_conv2d::get_examples_nhwc_relu1_weight_as_input());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_relu1_relaxed) {
     execute(transpose_conv2d::CreateModel_nhwc_relu1_relaxed,
             transpose_conv2d::is_ignored_nhwc_relu1_relaxed,
-            transpose_conv2d::examples_nhwc_relu1_relaxed);
+            transpose_conv2d::get_examples_nhwc_relu1_relaxed());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_relu1_relaxed_weight_as_input) {
     execute(transpose_conv2d::CreateModel_nhwc_relu1_relaxed_weight_as_input,
             transpose_conv2d::is_ignored_nhwc_relu1_relaxed_weight_as_input,
-            transpose_conv2d::examples_nhwc_relu1_relaxed_weight_as_input);
+            transpose_conv2d::get_examples_nhwc_relu1_relaxed_weight_as_input());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_relu1_quant8) {
     execute(transpose_conv2d::CreateModel_nhwc_relu1_quant8,
             transpose_conv2d::is_ignored_nhwc_relu1_quant8,
-            transpose_conv2d::examples_nhwc_relu1_quant8);
+            transpose_conv2d::get_examples_nhwc_relu1_quant8());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_relu1_quant8_weight_as_input) {
     execute(transpose_conv2d::CreateModel_nhwc_relu1_quant8_weight_as_input,
             transpose_conv2d::is_ignored_nhwc_relu1_quant8_weight_as_input,
-            transpose_conv2d::examples_nhwc_relu1_quant8_weight_as_input);
+            transpose_conv2d::get_examples_nhwc_relu1_quant8_weight_as_input());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_relu6) {
     execute(transpose_conv2d::CreateModel_nhwc_relu6,
             transpose_conv2d::is_ignored_nhwc_relu6,
-            transpose_conv2d::examples_nhwc_relu6);
+            transpose_conv2d::get_examples_nhwc_relu6());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_relu6_weight_as_input) {
     execute(transpose_conv2d::CreateModel_nhwc_relu6_weight_as_input,
             transpose_conv2d::is_ignored_nhwc_relu6_weight_as_input,
-            transpose_conv2d::examples_nhwc_relu6_weight_as_input);
+            transpose_conv2d::get_examples_nhwc_relu6_weight_as_input());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_relu6_relaxed) {
     execute(transpose_conv2d::CreateModel_nhwc_relu6_relaxed,
             transpose_conv2d::is_ignored_nhwc_relu6_relaxed,
-            transpose_conv2d::examples_nhwc_relu6_relaxed);
+            transpose_conv2d::get_examples_nhwc_relu6_relaxed());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_relu6_relaxed_weight_as_input) {
     execute(transpose_conv2d::CreateModel_nhwc_relu6_relaxed_weight_as_input,
             transpose_conv2d::is_ignored_nhwc_relu6_relaxed_weight_as_input,
-            transpose_conv2d::examples_nhwc_relu6_relaxed_weight_as_input);
+            transpose_conv2d::get_examples_nhwc_relu6_relaxed_weight_as_input());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_relu6_quant8) {
     execute(transpose_conv2d::CreateModel_nhwc_relu6_quant8,
             transpose_conv2d::is_ignored_nhwc_relu6_quant8,
-            transpose_conv2d::examples_nhwc_relu6_quant8);
+            transpose_conv2d::get_examples_nhwc_relu6_quant8());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_relu6_quant8_weight_as_input) {
     execute(transpose_conv2d::CreateModel_nhwc_relu6_quant8_weight_as_input,
             transpose_conv2d::is_ignored_nhwc_relu6_quant8_weight_as_input,
-            transpose_conv2d::examples_nhwc_relu6_quant8_weight_as_input);
+            transpose_conv2d::get_examples_nhwc_relu6_quant8_weight_as_input());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_none) {
     execute(transpose_conv2d::CreateModel_nchw_none,
             transpose_conv2d::is_ignored_nchw_none,
-            transpose_conv2d::examples_nchw_none);
+            transpose_conv2d::get_examples_nchw_none());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_none_weight_as_input) {
     execute(transpose_conv2d::CreateModel_nchw_none_weight_as_input,
             transpose_conv2d::is_ignored_nchw_none_weight_as_input,
-            transpose_conv2d::examples_nchw_none_weight_as_input);
+            transpose_conv2d::get_examples_nchw_none_weight_as_input());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_none_relaxed) {
     execute(transpose_conv2d::CreateModel_nchw_none_relaxed,
             transpose_conv2d::is_ignored_nchw_none_relaxed,
-            transpose_conv2d::examples_nchw_none_relaxed);
+            transpose_conv2d::get_examples_nchw_none_relaxed());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_none_relaxed_weight_as_input) {
     execute(transpose_conv2d::CreateModel_nchw_none_relaxed_weight_as_input,
             transpose_conv2d::is_ignored_nchw_none_relaxed_weight_as_input,
-            transpose_conv2d::examples_nchw_none_relaxed_weight_as_input);
+            transpose_conv2d::get_examples_nchw_none_relaxed_weight_as_input());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_none_quant8) {
     execute(transpose_conv2d::CreateModel_nchw_none_quant8,
             transpose_conv2d::is_ignored_nchw_none_quant8,
-            transpose_conv2d::examples_nchw_none_quant8);
+            transpose_conv2d::get_examples_nchw_none_quant8());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_none_quant8_weight_as_input) {
     execute(transpose_conv2d::CreateModel_nchw_none_quant8_weight_as_input,
             transpose_conv2d::is_ignored_nchw_none_quant8_weight_as_input,
-            transpose_conv2d::examples_nchw_none_quant8_weight_as_input);
+            transpose_conv2d::get_examples_nchw_none_quant8_weight_as_input());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_relu) {
     execute(transpose_conv2d::CreateModel_nchw_relu,
             transpose_conv2d::is_ignored_nchw_relu,
-            transpose_conv2d::examples_nchw_relu);
+            transpose_conv2d::get_examples_nchw_relu());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_relu_weight_as_input) {
     execute(transpose_conv2d::CreateModel_nchw_relu_weight_as_input,
             transpose_conv2d::is_ignored_nchw_relu_weight_as_input,
-            transpose_conv2d::examples_nchw_relu_weight_as_input);
+            transpose_conv2d::get_examples_nchw_relu_weight_as_input());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_relu_relaxed) {
     execute(transpose_conv2d::CreateModel_nchw_relu_relaxed,
             transpose_conv2d::is_ignored_nchw_relu_relaxed,
-            transpose_conv2d::examples_nchw_relu_relaxed);
+            transpose_conv2d::get_examples_nchw_relu_relaxed());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_relu_relaxed_weight_as_input) {
     execute(transpose_conv2d::CreateModel_nchw_relu_relaxed_weight_as_input,
             transpose_conv2d::is_ignored_nchw_relu_relaxed_weight_as_input,
-            transpose_conv2d::examples_nchw_relu_relaxed_weight_as_input);
+            transpose_conv2d::get_examples_nchw_relu_relaxed_weight_as_input());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_relu_quant8) {
     execute(transpose_conv2d::CreateModel_nchw_relu_quant8,
             transpose_conv2d::is_ignored_nchw_relu_quant8,
-            transpose_conv2d::examples_nchw_relu_quant8);
+            transpose_conv2d::get_examples_nchw_relu_quant8());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_relu_quant8_weight_as_input) {
     execute(transpose_conv2d::CreateModel_nchw_relu_quant8_weight_as_input,
             transpose_conv2d::is_ignored_nchw_relu_quant8_weight_as_input,
-            transpose_conv2d::examples_nchw_relu_quant8_weight_as_input);
+            transpose_conv2d::get_examples_nchw_relu_quant8_weight_as_input());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_relu1) {
     execute(transpose_conv2d::CreateModel_nchw_relu1,
             transpose_conv2d::is_ignored_nchw_relu1,
-            transpose_conv2d::examples_nchw_relu1);
+            transpose_conv2d::get_examples_nchw_relu1());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_relu1_weight_as_input) {
     execute(transpose_conv2d::CreateModel_nchw_relu1_weight_as_input,
             transpose_conv2d::is_ignored_nchw_relu1_weight_as_input,
-            transpose_conv2d::examples_nchw_relu1_weight_as_input);
+            transpose_conv2d::get_examples_nchw_relu1_weight_as_input());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_relu1_relaxed) {
     execute(transpose_conv2d::CreateModel_nchw_relu1_relaxed,
             transpose_conv2d::is_ignored_nchw_relu1_relaxed,
-            transpose_conv2d::examples_nchw_relu1_relaxed);
+            transpose_conv2d::get_examples_nchw_relu1_relaxed());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_relu1_relaxed_weight_as_input) {
     execute(transpose_conv2d::CreateModel_nchw_relu1_relaxed_weight_as_input,
             transpose_conv2d::is_ignored_nchw_relu1_relaxed_weight_as_input,
-            transpose_conv2d::examples_nchw_relu1_relaxed_weight_as_input);
+            transpose_conv2d::get_examples_nchw_relu1_relaxed_weight_as_input());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_relu1_quant8) {
     execute(transpose_conv2d::CreateModel_nchw_relu1_quant8,
             transpose_conv2d::is_ignored_nchw_relu1_quant8,
-            transpose_conv2d::examples_nchw_relu1_quant8);
+            transpose_conv2d::get_examples_nchw_relu1_quant8());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_relu1_quant8_weight_as_input) {
     execute(transpose_conv2d::CreateModel_nchw_relu1_quant8_weight_as_input,
             transpose_conv2d::is_ignored_nchw_relu1_quant8_weight_as_input,
-            transpose_conv2d::examples_nchw_relu1_quant8_weight_as_input);
+            transpose_conv2d::get_examples_nchw_relu1_quant8_weight_as_input());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_relu6) {
     execute(transpose_conv2d::CreateModel_nchw_relu6,
             transpose_conv2d::is_ignored_nchw_relu6,
-            transpose_conv2d::examples_nchw_relu6);
+            transpose_conv2d::get_examples_nchw_relu6());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_relu6_weight_as_input) {
     execute(transpose_conv2d::CreateModel_nchw_relu6_weight_as_input,
             transpose_conv2d::is_ignored_nchw_relu6_weight_as_input,
-            transpose_conv2d::examples_nchw_relu6_weight_as_input);
+            transpose_conv2d::get_examples_nchw_relu6_weight_as_input());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_relu6_relaxed) {
     execute(transpose_conv2d::CreateModel_nchw_relu6_relaxed,
             transpose_conv2d::is_ignored_nchw_relu6_relaxed,
-            transpose_conv2d::examples_nchw_relu6_relaxed);
+            transpose_conv2d::get_examples_nchw_relu6_relaxed());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_relu6_relaxed_weight_as_input) {
     execute(transpose_conv2d::CreateModel_nchw_relu6_relaxed_weight_as_input,
             transpose_conv2d::is_ignored_nchw_relu6_relaxed_weight_as_input,
-            transpose_conv2d::examples_nchw_relu6_relaxed_weight_as_input);
+            transpose_conv2d::get_examples_nchw_relu6_relaxed_weight_as_input());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_relu6_quant8) {
     execute(transpose_conv2d::CreateModel_nchw_relu6_quant8,
             transpose_conv2d::is_ignored_nchw_relu6_quant8,
-            transpose_conv2d::examples_nchw_relu6_quant8);
+            transpose_conv2d::get_examples_nchw_relu6_quant8());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_relu6_quant8_weight_as_input) {
     execute(transpose_conv2d::CreateModel_nchw_relu6_quant8_weight_as_input,
             transpose_conv2d::is_ignored_nchw_relu6_quant8_weight_as_input,
-            transpose_conv2d::examples_nchw_relu6_quant8_weight_as_input);
+            transpose_conv2d::get_examples_nchw_relu6_quant8_weight_as_input());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc) {
     execute(transpose_conv2d::CreateModel_nhwc,
             transpose_conv2d::is_ignored_nhwc,
-            transpose_conv2d::examples_nhwc);
+            transpose_conv2d::get_examples_nhwc());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_weight_as_input) {
     execute(transpose_conv2d::CreateModel_nhwc_weight_as_input,
             transpose_conv2d::is_ignored_nhwc_weight_as_input,
-            transpose_conv2d::examples_nhwc_weight_as_input);
+            transpose_conv2d::get_examples_nhwc_weight_as_input());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_relaxed) {
     execute(transpose_conv2d::CreateModel_nhwc_relaxed,
             transpose_conv2d::is_ignored_nhwc_relaxed,
-            transpose_conv2d::examples_nhwc_relaxed);
+            transpose_conv2d::get_examples_nhwc_relaxed());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_relaxed_weight_as_input) {
     execute(transpose_conv2d::CreateModel_nhwc_relaxed_weight_as_input,
             transpose_conv2d::is_ignored_nhwc_relaxed_weight_as_input,
-            transpose_conv2d::examples_nhwc_relaxed_weight_as_input);
+            transpose_conv2d::get_examples_nhwc_relaxed_weight_as_input());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_quant8) {
     execute(transpose_conv2d::CreateModel_nhwc_quant8,
             transpose_conv2d::is_ignored_nhwc_quant8,
-            transpose_conv2d::examples_nhwc_quant8);
+            transpose_conv2d::get_examples_nhwc_quant8());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_quant8_weight_as_input) {
     execute(transpose_conv2d::CreateModel_nhwc_quant8_weight_as_input,
             transpose_conv2d::is_ignored_nhwc_quant8_weight_as_input,
-            transpose_conv2d::examples_nhwc_quant8_weight_as_input);
+            transpose_conv2d::get_examples_nhwc_quant8_weight_as_input());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw) {
     execute(transpose_conv2d::CreateModel_nchw,
             transpose_conv2d::is_ignored_nchw,
-            transpose_conv2d::examples_nchw);
+            transpose_conv2d::get_examples_nchw());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_weight_as_input) {
     execute(transpose_conv2d::CreateModel_nchw_weight_as_input,
             transpose_conv2d::is_ignored_nchw_weight_as_input,
-            transpose_conv2d::examples_nchw_weight_as_input);
+            transpose_conv2d::get_examples_nchw_weight_as_input());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_relaxed) {
     execute(transpose_conv2d::CreateModel_nchw_relaxed,
             transpose_conv2d::is_ignored_nchw_relaxed,
-            transpose_conv2d::examples_nchw_relaxed);
+            transpose_conv2d::get_examples_nchw_relaxed());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_relaxed_weight_as_input) {
     execute(transpose_conv2d::CreateModel_nchw_relaxed_weight_as_input,
             transpose_conv2d::is_ignored_nchw_relaxed_weight_as_input,
-            transpose_conv2d::examples_nchw_relaxed_weight_as_input);
+            transpose_conv2d::get_examples_nchw_relaxed_weight_as_input());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_quant8) {
     execute(transpose_conv2d::CreateModel_nchw_quant8,
             transpose_conv2d::is_ignored_nchw_quant8,
-            transpose_conv2d::examples_nchw_quant8);
+            transpose_conv2d::get_examples_nchw_quant8());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_quant8_weight_as_input) {
     execute(transpose_conv2d::CreateModel_nchw_quant8_weight_as_input,
             transpose_conv2d::is_ignored_nchw_quant8_weight_as_input,
-            transpose_conv2d::examples_nchw_quant8_weight_as_input);
+            transpose_conv2d::get_examples_nchw_quant8_weight_as_input());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_2) {
     execute(transpose_conv2d::CreateModel_nhwc_2,
             transpose_conv2d::is_ignored_nhwc_2,
-            transpose_conv2d::examples_nhwc_2);
+            transpose_conv2d::get_examples_nhwc_2());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_weight_as_input_2) {
     execute(transpose_conv2d::CreateModel_nhwc_weight_as_input_2,
             transpose_conv2d::is_ignored_nhwc_weight_as_input_2,
-            transpose_conv2d::examples_nhwc_weight_as_input_2);
+            transpose_conv2d::get_examples_nhwc_weight_as_input_2());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_relaxed_2) {
     execute(transpose_conv2d::CreateModel_nhwc_relaxed_2,
             transpose_conv2d::is_ignored_nhwc_relaxed_2,
-            transpose_conv2d::examples_nhwc_relaxed_2);
+            transpose_conv2d::get_examples_nhwc_relaxed_2());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_relaxed_weight_as_input_2) {
     execute(transpose_conv2d::CreateModel_nhwc_relaxed_weight_as_input_2,
             transpose_conv2d::is_ignored_nhwc_relaxed_weight_as_input_2,
-            transpose_conv2d::examples_nhwc_relaxed_weight_as_input_2);
+            transpose_conv2d::get_examples_nhwc_relaxed_weight_as_input_2());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_quant8_2) {
     execute(transpose_conv2d::CreateModel_nhwc_quant8_2,
             transpose_conv2d::is_ignored_nhwc_quant8_2,
-            transpose_conv2d::examples_nhwc_quant8_2);
+            transpose_conv2d::get_examples_nhwc_quant8_2());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_quant8_weight_as_input_2) {
     execute(transpose_conv2d::CreateModel_nhwc_quant8_weight_as_input_2,
             transpose_conv2d::is_ignored_nhwc_quant8_weight_as_input_2,
-            transpose_conv2d::examples_nhwc_quant8_weight_as_input_2);
+            transpose_conv2d::get_examples_nhwc_quant8_weight_as_input_2());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_2) {
     execute(transpose_conv2d::CreateModel_nchw_2,
             transpose_conv2d::is_ignored_nchw_2,
-            transpose_conv2d::examples_nchw_2);
+            transpose_conv2d::get_examples_nchw_2());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_weight_as_input_2) {
     execute(transpose_conv2d::CreateModel_nchw_weight_as_input_2,
             transpose_conv2d::is_ignored_nchw_weight_as_input_2,
-            transpose_conv2d::examples_nchw_weight_as_input_2);
+            transpose_conv2d::get_examples_nchw_weight_as_input_2());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_relaxed_2) {
     execute(transpose_conv2d::CreateModel_nchw_relaxed_2,
             transpose_conv2d::is_ignored_nchw_relaxed_2,
-            transpose_conv2d::examples_nchw_relaxed_2);
+            transpose_conv2d::get_examples_nchw_relaxed_2());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_relaxed_weight_as_input_2) {
     execute(transpose_conv2d::CreateModel_nchw_relaxed_weight_as_input_2,
             transpose_conv2d::is_ignored_nchw_relaxed_weight_as_input_2,
-            transpose_conv2d::examples_nchw_relaxed_weight_as_input_2);
+            transpose_conv2d::get_examples_nchw_relaxed_weight_as_input_2());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_quant8_2) {
     execute(transpose_conv2d::CreateModel_nchw_quant8_2,
             transpose_conv2d::is_ignored_nchw_quant8_2,
-            transpose_conv2d::examples_nchw_quant8_2);
+            transpose_conv2d::get_examples_nchw_quant8_2());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_quant8_weight_as_input_2) {
     execute(transpose_conv2d::CreateModel_nchw_quant8_weight_as_input_2,
             transpose_conv2d::is_ignored_nchw_quant8_weight_as_input_2,
-            transpose_conv2d::examples_nchw_quant8_weight_as_input_2);
+            transpose_conv2d::get_examples_nchw_quant8_weight_as_input_2());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_3) {
     execute(transpose_conv2d::CreateModel_nhwc_3,
             transpose_conv2d::is_ignored_nhwc_3,
-            transpose_conv2d::examples_nhwc_3);
+            transpose_conv2d::get_examples_nhwc_3());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_weight_as_input_3) {
     execute(transpose_conv2d::CreateModel_nhwc_weight_as_input_3,
             transpose_conv2d::is_ignored_nhwc_weight_as_input_3,
-            transpose_conv2d::examples_nhwc_weight_as_input_3);
+            transpose_conv2d::get_examples_nhwc_weight_as_input_3());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_relaxed_3) {
     execute(transpose_conv2d::CreateModel_nhwc_relaxed_3,
             transpose_conv2d::is_ignored_nhwc_relaxed_3,
-            transpose_conv2d::examples_nhwc_relaxed_3);
+            transpose_conv2d::get_examples_nhwc_relaxed_3());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_relaxed_weight_as_input_3) {
     execute(transpose_conv2d::CreateModel_nhwc_relaxed_weight_as_input_3,
             transpose_conv2d::is_ignored_nhwc_relaxed_weight_as_input_3,
-            transpose_conv2d::examples_nhwc_relaxed_weight_as_input_3);
+            transpose_conv2d::get_examples_nhwc_relaxed_weight_as_input_3());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_quant8_3) {
     execute(transpose_conv2d::CreateModel_nhwc_quant8_3,
             transpose_conv2d::is_ignored_nhwc_quant8_3,
-            transpose_conv2d::examples_nhwc_quant8_3);
+            transpose_conv2d::get_examples_nhwc_quant8_3());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_quant8_weight_as_input_3) {
     execute(transpose_conv2d::CreateModel_nhwc_quant8_weight_as_input_3,
             transpose_conv2d::is_ignored_nhwc_quant8_weight_as_input_3,
-            transpose_conv2d::examples_nhwc_quant8_weight_as_input_3);
+            transpose_conv2d::get_examples_nhwc_quant8_weight_as_input_3());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_3) {
     execute(transpose_conv2d::CreateModel_nchw_3,
             transpose_conv2d::is_ignored_nchw_3,
-            transpose_conv2d::examples_nchw_3);
+            transpose_conv2d::get_examples_nchw_3());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_weight_as_input_3) {
     execute(transpose_conv2d::CreateModel_nchw_weight_as_input_3,
             transpose_conv2d::is_ignored_nchw_weight_as_input_3,
-            transpose_conv2d::examples_nchw_weight_as_input_3);
+            transpose_conv2d::get_examples_nchw_weight_as_input_3());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_relaxed_3) {
     execute(transpose_conv2d::CreateModel_nchw_relaxed_3,
             transpose_conv2d::is_ignored_nchw_relaxed_3,
-            transpose_conv2d::examples_nchw_relaxed_3);
+            transpose_conv2d::get_examples_nchw_relaxed_3());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_relaxed_weight_as_input_3) {
     execute(transpose_conv2d::CreateModel_nchw_relaxed_weight_as_input_3,
             transpose_conv2d::is_ignored_nchw_relaxed_weight_as_input_3,
-            transpose_conv2d::examples_nchw_relaxed_weight_as_input_3);
+            transpose_conv2d::get_examples_nchw_relaxed_weight_as_input_3());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_quant8_3) {
     execute(transpose_conv2d::CreateModel_nchw_quant8_3,
             transpose_conv2d::is_ignored_nchw_quant8_3,
-            transpose_conv2d::examples_nchw_quant8_3);
+            transpose_conv2d::get_examples_nchw_quant8_3());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_quant8_weight_as_input_3) {
     execute(transpose_conv2d::CreateModel_nchw_quant8_weight_as_input_3,
             transpose_conv2d::is_ignored_nchw_quant8_weight_as_input_3,
-            transpose_conv2d::examples_nchw_quant8_weight_as_input_3);
+            transpose_conv2d::get_examples_nchw_quant8_weight_as_input_3());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_4) {
     execute(transpose_conv2d::CreateModel_nhwc_4,
             transpose_conv2d::is_ignored_nhwc_4,
-            transpose_conv2d::examples_nhwc_4);
+            transpose_conv2d::get_examples_nhwc_4());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_weight_as_input_4) {
     execute(transpose_conv2d::CreateModel_nhwc_weight_as_input_4,
             transpose_conv2d::is_ignored_nhwc_weight_as_input_4,
-            transpose_conv2d::examples_nhwc_weight_as_input_4);
+            transpose_conv2d::get_examples_nhwc_weight_as_input_4());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_relaxed_4) {
     execute(transpose_conv2d::CreateModel_nhwc_relaxed_4,
             transpose_conv2d::is_ignored_nhwc_relaxed_4,
-            transpose_conv2d::examples_nhwc_relaxed_4);
+            transpose_conv2d::get_examples_nhwc_relaxed_4());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_relaxed_weight_as_input_4) {
     execute(transpose_conv2d::CreateModel_nhwc_relaxed_weight_as_input_4,
             transpose_conv2d::is_ignored_nhwc_relaxed_weight_as_input_4,
-            transpose_conv2d::examples_nhwc_relaxed_weight_as_input_4);
+            transpose_conv2d::get_examples_nhwc_relaxed_weight_as_input_4());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_quant8_4) {
     execute(transpose_conv2d::CreateModel_nhwc_quant8_4,
             transpose_conv2d::is_ignored_nhwc_quant8_4,
-            transpose_conv2d::examples_nhwc_quant8_4);
+            transpose_conv2d::get_examples_nhwc_quant8_4());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nhwc_quant8_weight_as_input_4) {
     execute(transpose_conv2d::CreateModel_nhwc_quant8_weight_as_input_4,
             transpose_conv2d::is_ignored_nhwc_quant8_weight_as_input_4,
-            transpose_conv2d::examples_nhwc_quant8_weight_as_input_4);
+            transpose_conv2d::get_examples_nhwc_quant8_weight_as_input_4());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_4) {
     execute(transpose_conv2d::CreateModel_nchw_4,
             transpose_conv2d::is_ignored_nchw_4,
-            transpose_conv2d::examples_nchw_4);
+            transpose_conv2d::get_examples_nchw_4());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_weight_as_input_4) {
     execute(transpose_conv2d::CreateModel_nchw_weight_as_input_4,
             transpose_conv2d::is_ignored_nchw_weight_as_input_4,
-            transpose_conv2d::examples_nchw_weight_as_input_4);
+            transpose_conv2d::get_examples_nchw_weight_as_input_4());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_relaxed_4) {
     execute(transpose_conv2d::CreateModel_nchw_relaxed_4,
             transpose_conv2d::is_ignored_nchw_relaxed_4,
-            transpose_conv2d::examples_nchw_relaxed_4);
+            transpose_conv2d::get_examples_nchw_relaxed_4());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_relaxed_weight_as_input_4) {
     execute(transpose_conv2d::CreateModel_nchw_relaxed_weight_as_input_4,
             transpose_conv2d::is_ignored_nchw_relaxed_weight_as_input_4,
-            transpose_conv2d::examples_nchw_relaxed_weight_as_input_4);
+            transpose_conv2d::get_examples_nchw_relaxed_weight_as_input_4());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_quant8_4) {
     execute(transpose_conv2d::CreateModel_nchw_quant8_4,
             transpose_conv2d::is_ignored_nchw_quant8_4,
-            transpose_conv2d::examples_nchw_quant8_4);
+            transpose_conv2d::get_examples_nchw_quant8_4());
 }
 
 TEST_F(GeneratedTests, transpose_conv2d_nchw_quant8_weight_as_input_4) {
     execute(transpose_conv2d::CreateModel_nchw_quant8_weight_as_input_4,
             transpose_conv2d::is_ignored_nchw_quant8_weight_as_input_4,
-            transpose_conv2d::examples_nchw_quant8_weight_as_input_4);
+            transpose_conv2d::get_examples_nchw_quant8_weight_as_input_4());
 }
 
diff --git a/nn/runtime/test/generated/tests/transpose_float16.mod.py.cpp b/nn/runtime/test/generated/tests/transpose_float16.mod.py.cpp
new file mode 100644
index 0000000..8dac13d
--- /dev/null
+++ b/nn/runtime/test/generated/tests/transpose_float16.mod.py.cpp
@@ -0,0 +1,17 @@
+// clang-format off
+// Generated file (from: transpose_float16.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace transpose_float16 {
+// Generated transpose_float16 test
+#include "generated/examples/transpose_float16.example.cpp"
+// Generated model constructor
+#include "generated/models/transpose_float16.model.cpp"
+} // namespace transpose_float16
+
+TEST_F(GeneratedTests, transpose_float16) {
+    execute(transpose_float16::CreateModel,
+            transpose_float16::is_ignored,
+            transpose_float16::get_examples());
+}
+
diff --git a/nn/runtime/test/generated/tests/transpose_float_1.mod.py.cpp b/nn/runtime/test/generated/tests/transpose_float_1.mod.py.cpp
index 39ded2a..f247620 100644
--- a/nn/runtime/test/generated/tests/transpose_float_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/transpose_float_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, transpose_float_1) {
     execute(transpose_float_1::CreateModel,
             transpose_float_1::is_ignored,
-            transpose_float_1::examples);
+            transpose_float_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/transpose_float_1_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/transpose_float_1_relaxed.mod.py.cpp
index b4f391b..90093bf 100644
--- a/nn/runtime/test/generated/tests/transpose_float_1_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/transpose_float_1_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, transpose_float_1_relaxed) {
     execute(transpose_float_1_relaxed::CreateModel,
             transpose_float_1_relaxed::is_ignored,
-            transpose_float_1_relaxed::examples);
+            transpose_float_1_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/transpose_quant8_1.mod.py.cpp b/nn/runtime/test/generated/tests/transpose_quant8_1.mod.py.cpp
index a895716..45f08c5 100644
--- a/nn/runtime/test/generated/tests/transpose_quant8_1.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/transpose_quant8_1.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, transpose_quant8_1) {
     execute(transpose_quant8_1::CreateModel,
             transpose_quant8_1::is_ignored,
-            transpose_quant8_1::examples);
+            transpose_quant8_1::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/transpose_relaxed.mod.py.cpp b/nn/runtime/test/generated/tests/transpose_relaxed.mod.py.cpp
index 41a3191..08211dd 100644
--- a/nn/runtime/test/generated/tests/transpose_relaxed.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/transpose_relaxed.mod.py.cpp
@@ -12,6 +12,6 @@
 TEST_F(GeneratedTests, transpose_relaxed) {
     execute(transpose_relaxed::CreateModel,
             transpose_relaxed::is_ignored,
-            transpose_relaxed::examples);
+            transpose_relaxed::get_examples());
 }
 
diff --git a/nn/runtime/test/generated/tests/transpose_v1_2.mod.py.cpp b/nn/runtime/test/generated/tests/transpose_v1_2.mod.py.cpp
index 586de6d..132b5b1 100644
--- a/nn/runtime/test/generated/tests/transpose_v1_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/transpose_v1_2.mod.py.cpp
@@ -12,18 +12,18 @@
 TEST_F(GeneratedTests, transpose_v1_2) {
     execute(transpose_v1_2::CreateModel,
             transpose_v1_2::is_ignored,
-            transpose_v1_2::examples);
+            transpose_v1_2::get_examples());
 }
 
 TEST_F(GeneratedTests, transpose_v1_2_relaxed) {
     execute(transpose_v1_2::CreateModel_relaxed,
             transpose_v1_2::is_ignored_relaxed,
-            transpose_v1_2::examples_relaxed);
+            transpose_v1_2::get_examples_relaxed());
 }
 
 TEST_F(GeneratedTests, transpose_v1_2_quant8) {
     execute(transpose_v1_2::CreateModel_quant8,
             transpose_v1_2::is_ignored_quant8,
-            transpose_v1_2::examples_quant8);
+            transpose_v1_2::get_examples_quant8());
 }
 
diff --git a/nn/runtime/test/generated/vts_models/abs.model.cpp b/nn/runtime/test/generated/vts_models/abs.model.cpp
new file mode 100644
index 0000000..a903484
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/abs.model.cpp
@@ -0,0 +1,156 @@
+// clang-format off
+// Generated file (from: abs.mod.py). Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 3, 4, 5},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 3, 4, 5},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ABS,
+            .inputs = {0},
+            .outputs = {1},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {1};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 3, 4, 5},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 3, 4, 5},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ABS,
+            .inputs = {0},
+            .outputs = {1},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {1};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 3, 4, 5},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 3, 4, 5},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ABS,
+            .inputs = {0},
+            .outputs = {1},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {1};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/vts_models/argmax_1.model.cpp b/nn/runtime/test/generated/vts_models/argmax_1.model.cpp
new file mode 100644
index 0000000..adefc6f
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/argmax_1.model.cpp
@@ -0,0 +1,313 @@
+// clang-format off
+// Generated file (from: argmax_1.mod.py). Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ARGMAX,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {
+      1, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ARGMAX,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {
+      1, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ARGMAX,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {
+      1, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_int32() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ARGMAX,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {
+      1, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_int32(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_quant8() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 1.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ARGMAX,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {
+      1, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/vts_models/argmax_1_float.model.cpp b/nn/runtime/test/generated/vts_models/argmax_1_float.model.cpp
deleted file mode 100644
index 191956f..0000000
--- a/nn/runtime/test/generated/vts_models/argmax_1_float.model.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_1_float.mod.py). Do not edit
-// Create the model
-Model createTestModel() {
-    const std::vector<Operand> operands = {
-        {
-            .type = OperandType::TENSOR_FLOAT32,
-            .dimensions = {2, 2},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::INT32,
-            .dimensions = {},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
-        },
-        {
-            .type = OperandType::TENSOR_INT32,
-            .dimensions = {2},
-            .numberOfConsumers = 0,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_OUTPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        }
-    };
-
-    const std::vector<Operation> operations = {
-        {
-            .type = OperationType::ARGMAX,
-            .inputs = {0, 1},
-            .outputs = {2},
-        }
-    };
-
-    const std::vector<uint32_t> inputIndexes = {0};
-    const std::vector<uint32_t> outputIndexes = {2};
-    std::vector<uint8_t> operandValues = {
-      1, 0, 0, 0
-    };
-    const std::vector<hidl_memory> pools = {};
-
-    return {
-        .operands = operands,
-        .operations = operations,
-        .inputIndexes = inputIndexes,
-        .outputIndexes = outputIndexes,
-        .operandValues = operandValues,
-        .pools = pools,
-    };
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/vts_models/argmax_1_float_relaxed.model.cpp b/nn/runtime/test/generated/vts_models/argmax_1_float_relaxed.model.cpp
deleted file mode 100644
index ac44cdb..0000000
--- a/nn/runtime/test/generated/vts_models/argmax_1_float_relaxed.model.cpp
+++ /dev/null
@@ -1,65 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_1_float_relaxed.mod.py). Do not edit
-// Create the model
-Model createTestModel() {
-    const std::vector<Operand> operands = {
-        {
-            .type = OperandType::TENSOR_FLOAT32,
-            .dimensions = {2, 2},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::INT32,
-            .dimensions = {},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
-        },
-        {
-            .type = OperandType::TENSOR_INT32,
-            .dimensions = {2},
-            .numberOfConsumers = 0,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_OUTPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        }
-    };
-
-    const std::vector<Operation> operations = {
-        {
-            .type = OperationType::ARGMAX,
-            .inputs = {0, 1},
-            .outputs = {2},
-        }
-    };
-
-    const std::vector<uint32_t> inputIndexes = {0};
-    const std::vector<uint32_t> outputIndexes = {2};
-    std::vector<uint8_t> operandValues = {
-      1, 0, 0, 0
-    };
-    const std::vector<hidl_memory> pools = {};
-
-    return {
-        .operands = operands,
-        .operations = operations,
-        .inputIndexes = inputIndexes,
-        .outputIndexes = outputIndexes,
-        .operandValues = operandValues,
-        .pools = pools,
-        .relaxComputationFloat32toFloat16 = true,
-    };
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/vts_models/argmax_1_int32.model.cpp b/nn/runtime/test/generated/vts_models/argmax_1_int32.model.cpp
deleted file mode 100644
index 3d7cba2..0000000
--- a/nn/runtime/test/generated/vts_models/argmax_1_int32.model.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_1_int32.mod.py). Do not edit
-// Create the model
-Model createTestModel() {
-    const std::vector<Operand> operands = {
-        {
-            .type = OperandType::TENSOR_INT32,
-            .dimensions = {2, 2},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::INT32,
-            .dimensions = {},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
-        },
-        {
-            .type = OperandType::TENSOR_INT32,
-            .dimensions = {2},
-            .numberOfConsumers = 0,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_OUTPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        }
-    };
-
-    const std::vector<Operation> operations = {
-        {
-            .type = OperationType::ARGMAX,
-            .inputs = {0, 1},
-            .outputs = {2},
-        }
-    };
-
-    const std::vector<uint32_t> inputIndexes = {0};
-    const std::vector<uint32_t> outputIndexes = {2};
-    std::vector<uint8_t> operandValues = {
-      1, 0, 0, 0
-    };
-    const std::vector<hidl_memory> pools = {};
-
-    return {
-        .operands = operands,
-        .operations = operations,
-        .inputIndexes = inputIndexes,
-        .outputIndexes = outputIndexes,
-        .operandValues = operandValues,
-        .pools = pools,
-    };
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/vts_models/argmax_1_quant8.model.cpp b/nn/runtime/test/generated/vts_models/argmax_1_quant8.model.cpp
deleted file mode 100644
index 43a3aa6..0000000
--- a/nn/runtime/test/generated/vts_models/argmax_1_quant8.model.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_1_quant8.mod.py). Do not edit
-// Create the model
-Model createTestModel() {
-    const std::vector<Operand> operands = {
-        {
-            .type = OperandType::TENSOR_QUANT8_ASYMM,
-            .dimensions = {2, 2},
-            .numberOfConsumers = 1,
-            .scale = 1.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::INT32,
-            .dimensions = {},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
-        },
-        {
-            .type = OperandType::TENSOR_INT32,
-            .dimensions = {2},
-            .numberOfConsumers = 0,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_OUTPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        }
-    };
-
-    const std::vector<Operation> operations = {
-        {
-            .type = OperationType::ARGMAX,
-            .inputs = {0, 1},
-            .outputs = {2},
-        }
-    };
-
-    const std::vector<uint32_t> inputIndexes = {0};
-    const std::vector<uint32_t> outputIndexes = {2};
-    std::vector<uint8_t> operandValues = {
-      1, 0, 0, 0
-    };
-    const std::vector<hidl_memory> pools = {};
-
-    return {
-        .operands = operands,
-        .operations = operations,
-        .inputIndexes = inputIndexes,
-        .outputIndexes = outputIndexes,
-        .operandValues = operandValues,
-        .pools = pools,
-    };
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/vts_models/argmax_2.model.cpp b/nn/runtime/test/generated/vts_models/argmax_2.model.cpp
new file mode 100644
index 0000000..ec9eb3f
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/argmax_2.model.cpp
@@ -0,0 +1,313 @@
+// clang-format off
+// Generated file (from: argmax_2.mod.py). Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ARGMAX,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ARGMAX,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ARGMAX,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_int32() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ARGMAX,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_int32(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_quant8() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 1.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ARGMAX,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/vts_models/argmax_2_float.model.cpp b/nn/runtime/test/generated/vts_models/argmax_2_float.model.cpp
deleted file mode 100644
index 8778262..0000000
--- a/nn/runtime/test/generated/vts_models/argmax_2_float.model.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_2_float.mod.py). Do not edit
-// Create the model
-Model createTestModel() {
-    const std::vector<Operand> operands = {
-        {
-            .type = OperandType::TENSOR_FLOAT32,
-            .dimensions = {2, 2},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::INT32,
-            .dimensions = {},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
-        },
-        {
-            .type = OperandType::TENSOR_INT32,
-            .dimensions = {2},
-            .numberOfConsumers = 0,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_OUTPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        }
-    };
-
-    const std::vector<Operation> operations = {
-        {
-            .type = OperationType::ARGMAX,
-            .inputs = {0, 1},
-            .outputs = {2},
-        }
-    };
-
-    const std::vector<uint32_t> inputIndexes = {0};
-    const std::vector<uint32_t> outputIndexes = {2};
-    std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0
-    };
-    const std::vector<hidl_memory> pools = {};
-
-    return {
-        .operands = operands,
-        .operations = operations,
-        .inputIndexes = inputIndexes,
-        .outputIndexes = outputIndexes,
-        .operandValues = operandValues,
-        .pools = pools,
-    };
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/vts_models/argmax_2_int32.model.cpp b/nn/runtime/test/generated/vts_models/argmax_2_int32.model.cpp
deleted file mode 100644
index 741bf18..0000000
--- a/nn/runtime/test/generated/vts_models/argmax_2_int32.model.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_2_int32.mod.py). Do not edit
-// Create the model
-Model createTestModel() {
-    const std::vector<Operand> operands = {
-        {
-            .type = OperandType::TENSOR_INT32,
-            .dimensions = {2, 2},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::INT32,
-            .dimensions = {},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
-        },
-        {
-            .type = OperandType::TENSOR_INT32,
-            .dimensions = {2},
-            .numberOfConsumers = 0,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_OUTPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        }
-    };
-
-    const std::vector<Operation> operations = {
-        {
-            .type = OperationType::ARGMAX,
-            .inputs = {0, 1},
-            .outputs = {2},
-        }
-    };
-
-    const std::vector<uint32_t> inputIndexes = {0};
-    const std::vector<uint32_t> outputIndexes = {2};
-    std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0
-    };
-    const std::vector<hidl_memory> pools = {};
-
-    return {
-        .operands = operands,
-        .operations = operations,
-        .inputIndexes = inputIndexes,
-        .outputIndexes = outputIndexes,
-        .operandValues = operandValues,
-        .pools = pools,
-    };
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/vts_models/argmax_2_quant8.model.cpp b/nn/runtime/test/generated/vts_models/argmax_2_quant8.model.cpp
deleted file mode 100644
index df22a3e..0000000
--- a/nn/runtime/test/generated/vts_models/argmax_2_quant8.model.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-// clang-format off
-// Generated file (from: argmax_2_quant8.mod.py). Do not edit
-// Create the model
-Model createTestModel() {
-    const std::vector<Operand> operands = {
-        {
-            .type = OperandType::TENSOR_QUANT8_ASYMM,
-            .dimensions = {2, 2},
-            .numberOfConsumers = 1,
-            .scale = 1.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::INT32,
-            .dimensions = {},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
-        },
-        {
-            .type = OperandType::TENSOR_INT32,
-            .dimensions = {2},
-            .numberOfConsumers = 0,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_OUTPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        }
-    };
-
-    const std::vector<Operation> operations = {
-        {
-            .type = OperationType::ARGMAX,
-            .inputs = {0, 1},
-            .outputs = {2},
-        }
-    };
-
-    const std::vector<uint32_t> inputIndexes = {0};
-    const std::vector<uint32_t> outputIndexes = {2};
-    std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0
-    };
-    const std::vector<hidl_memory> pools = {};
-
-    return {
-        .operands = operands,
-        .operations = operations,
-        .inputIndexes = inputIndexes,
-        .outputIndexes = outputIndexes,
-        .operandValues = operandValues,
-        .pools = pools,
-    };
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/vts_models/argmax_3.model.cpp b/nn/runtime/test/generated/vts_models/argmax_3.model.cpp
new file mode 100644
index 0000000..9377585
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/argmax_3.model.cpp
@@ -0,0 +1,313 @@
+// clang-format off
+// Generated file (from: argmax_3.mod.py). Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ARGMAX,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {
+      255, 255, 255, 255
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ARGMAX,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {
+      255, 255, 255, 255
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ARGMAX,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {
+      255, 255, 255, 255
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_int32() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ARGMAX,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {
+      255, 255, 255, 255
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_int32(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_quant8() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 1.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ARGMAX,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {
+      255, 255, 255, 255
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/vts_models/argmin_1.model.cpp b/nn/runtime/test/generated/vts_models/argmin_1.model.cpp
new file mode 100644
index 0000000..456496f
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/argmin_1.model.cpp
@@ -0,0 +1,313 @@
+// clang-format off
+// Generated file (from: argmin_1.mod.py). Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ARGMIN,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {
+      1, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ARGMIN,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {
+      1, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ARGMIN,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {
+      1, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_int32() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ARGMIN,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {
+      1, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_int32(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_quant8() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 1.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ARGMIN,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {
+      1, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/vts_models/argmin_1_float.model.cpp b/nn/runtime/test/generated/vts_models/argmin_1_float.model.cpp
deleted file mode 100644
index e111404..0000000
--- a/nn/runtime/test/generated/vts_models/argmin_1_float.model.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_1_float.mod.py). Do not edit
-// Create the model
-Model createTestModel() {
-    const std::vector<Operand> operands = {
-        {
-            .type = OperandType::TENSOR_FLOAT32,
-            .dimensions = {2, 2},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::INT32,
-            .dimensions = {},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
-        },
-        {
-            .type = OperandType::TENSOR_INT32,
-            .dimensions = {2},
-            .numberOfConsumers = 0,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_OUTPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        }
-    };
-
-    const std::vector<Operation> operations = {
-        {
-            .type = OperationType::ARGMIN,
-            .inputs = {0, 1},
-            .outputs = {2},
-        }
-    };
-
-    const std::vector<uint32_t> inputIndexes = {0};
-    const std::vector<uint32_t> outputIndexes = {2};
-    std::vector<uint8_t> operandValues = {
-      1, 0, 0, 0
-    };
-    const std::vector<hidl_memory> pools = {};
-
-    return {
-        .operands = operands,
-        .operations = operations,
-        .inputIndexes = inputIndexes,
-        .outputIndexes = outputIndexes,
-        .operandValues = operandValues,
-        .pools = pools,
-    };
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/vts_models/argmin_1_float_relaxed.model.cpp b/nn/runtime/test/generated/vts_models/argmin_1_float_relaxed.model.cpp
deleted file mode 100644
index fde01be..0000000
--- a/nn/runtime/test/generated/vts_models/argmin_1_float_relaxed.model.cpp
+++ /dev/null
@@ -1,65 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_1_float_relaxed.mod.py). Do not edit
-// Create the model
-Model createTestModel() {
-    const std::vector<Operand> operands = {
-        {
-            .type = OperandType::TENSOR_FLOAT32,
-            .dimensions = {2, 2},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::INT32,
-            .dimensions = {},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
-        },
-        {
-            .type = OperandType::TENSOR_INT32,
-            .dimensions = {2},
-            .numberOfConsumers = 0,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_OUTPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        }
-    };
-
-    const std::vector<Operation> operations = {
-        {
-            .type = OperationType::ARGMIN,
-            .inputs = {0, 1},
-            .outputs = {2},
-        }
-    };
-
-    const std::vector<uint32_t> inputIndexes = {0};
-    const std::vector<uint32_t> outputIndexes = {2};
-    std::vector<uint8_t> operandValues = {
-      1, 0, 0, 0
-    };
-    const std::vector<hidl_memory> pools = {};
-
-    return {
-        .operands = operands,
-        .operations = operations,
-        .inputIndexes = inputIndexes,
-        .outputIndexes = outputIndexes,
-        .operandValues = operandValues,
-        .pools = pools,
-        .relaxComputationFloat32toFloat16 = true,
-    };
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/vts_models/argmin_1_quant8.model.cpp b/nn/runtime/test/generated/vts_models/argmin_1_quant8.model.cpp
deleted file mode 100644
index 36b8e3c..0000000
--- a/nn/runtime/test/generated/vts_models/argmin_1_quant8.model.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_1_quant8.mod.py). Do not edit
-// Create the model
-Model createTestModel() {
-    const std::vector<Operand> operands = {
-        {
-            .type = OperandType::TENSOR_QUANT8_ASYMM,
-            .dimensions = {2, 2},
-            .numberOfConsumers = 1,
-            .scale = 1.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::INT32,
-            .dimensions = {},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
-        },
-        {
-            .type = OperandType::TENSOR_INT32,
-            .dimensions = {2},
-            .numberOfConsumers = 0,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_OUTPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        }
-    };
-
-    const std::vector<Operation> operations = {
-        {
-            .type = OperationType::ARGMIN,
-            .inputs = {0, 1},
-            .outputs = {2},
-        }
-    };
-
-    const std::vector<uint32_t> inputIndexes = {0};
-    const std::vector<uint32_t> outputIndexes = {2};
-    std::vector<uint8_t> operandValues = {
-      1, 0, 0, 0
-    };
-    const std::vector<hidl_memory> pools = {};
-
-    return {
-        .operands = operands,
-        .operations = operations,
-        .inputIndexes = inputIndexes,
-        .outputIndexes = outputIndexes,
-        .operandValues = operandValues,
-        .pools = pools,
-    };
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/vts_models/argmin_2.model.cpp b/nn/runtime/test/generated/vts_models/argmin_2.model.cpp
new file mode 100644
index 0000000..8022c9d
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/argmin_2.model.cpp
@@ -0,0 +1,313 @@
+// clang-format off
+// Generated file (from: argmin_2.mod.py). Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ARGMIN,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ARGMIN,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ARGMIN,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_int32() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ARGMIN,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_int32(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_quant8() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 1.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ARGMIN,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/vts_models/argmin_2_float.model.cpp b/nn/runtime/test/generated/vts_models/argmin_2_float.model.cpp
deleted file mode 100644
index 22cb9ae..0000000
--- a/nn/runtime/test/generated/vts_models/argmin_2_float.model.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_2_float.mod.py). Do not edit
-// Create the model
-Model createTestModel() {
-    const std::vector<Operand> operands = {
-        {
-            .type = OperandType::TENSOR_FLOAT32,
-            .dimensions = {2, 2},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::INT32,
-            .dimensions = {},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
-        },
-        {
-            .type = OperandType::TENSOR_INT32,
-            .dimensions = {2},
-            .numberOfConsumers = 0,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_OUTPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        }
-    };
-
-    const std::vector<Operation> operations = {
-        {
-            .type = OperationType::ARGMIN,
-            .inputs = {0, 1},
-            .outputs = {2},
-        }
-    };
-
-    const std::vector<uint32_t> inputIndexes = {0};
-    const std::vector<uint32_t> outputIndexes = {2};
-    std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0
-    };
-    const std::vector<hidl_memory> pools = {};
-
-    return {
-        .operands = operands,
-        .operations = operations,
-        .inputIndexes = inputIndexes,
-        .outputIndexes = outputIndexes,
-        .operandValues = operandValues,
-        .pools = pools,
-    };
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/vts_models/argmin_2_float_relaxed.model.cpp b/nn/runtime/test/generated/vts_models/argmin_2_float_relaxed.model.cpp
deleted file mode 100644
index 612526c..0000000
--- a/nn/runtime/test/generated/vts_models/argmin_2_float_relaxed.model.cpp
+++ /dev/null
@@ -1,65 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_2_float_relaxed.mod.py). Do not edit
-// Create the model
-Model createTestModel() {
-    const std::vector<Operand> operands = {
-        {
-            .type = OperandType::TENSOR_FLOAT32,
-            .dimensions = {2, 2},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::INT32,
-            .dimensions = {},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
-        },
-        {
-            .type = OperandType::TENSOR_INT32,
-            .dimensions = {2},
-            .numberOfConsumers = 0,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_OUTPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        }
-    };
-
-    const std::vector<Operation> operations = {
-        {
-            .type = OperationType::ARGMIN,
-            .inputs = {0, 1},
-            .outputs = {2},
-        }
-    };
-
-    const std::vector<uint32_t> inputIndexes = {0};
-    const std::vector<uint32_t> outputIndexes = {2};
-    std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0
-    };
-    const std::vector<hidl_memory> pools = {};
-
-    return {
-        .operands = operands,
-        .operations = operations,
-        .inputIndexes = inputIndexes,
-        .outputIndexes = outputIndexes,
-        .operandValues = operandValues,
-        .pools = pools,
-        .relaxComputationFloat32toFloat16 = true,
-    };
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/vts_models/argmin_2_int32.model.cpp b/nn/runtime/test/generated/vts_models/argmin_2_int32.model.cpp
deleted file mode 100644
index c03fdb0..0000000
--- a/nn/runtime/test/generated/vts_models/argmin_2_int32.model.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_2_int32.mod.py). Do not edit
-// Create the model
-Model createTestModel() {
-    const std::vector<Operand> operands = {
-        {
-            .type = OperandType::TENSOR_INT32,
-            .dimensions = {2, 2},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::INT32,
-            .dimensions = {},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
-        },
-        {
-            .type = OperandType::TENSOR_INT32,
-            .dimensions = {2},
-            .numberOfConsumers = 0,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_OUTPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        }
-    };
-
-    const std::vector<Operation> operations = {
-        {
-            .type = OperationType::ARGMIN,
-            .inputs = {0, 1},
-            .outputs = {2},
-        }
-    };
-
-    const std::vector<uint32_t> inputIndexes = {0};
-    const std::vector<uint32_t> outputIndexes = {2};
-    std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0
-    };
-    const std::vector<hidl_memory> pools = {};
-
-    return {
-        .operands = operands,
-        .operations = operations,
-        .inputIndexes = inputIndexes,
-        .outputIndexes = outputIndexes,
-        .operandValues = operandValues,
-        .pools = pools,
-    };
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/vts_models/argmin_2_quant8.model.cpp b/nn/runtime/test/generated/vts_models/argmin_2_quant8.model.cpp
deleted file mode 100644
index ec7c35e..0000000
--- a/nn/runtime/test/generated/vts_models/argmin_2_quant8.model.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_2_quant8.mod.py). Do not edit
-// Create the model
-Model createTestModel() {
-    const std::vector<Operand> operands = {
-        {
-            .type = OperandType::TENSOR_QUANT8_ASYMM,
-            .dimensions = {2, 2},
-            .numberOfConsumers = 1,
-            .scale = 1.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::INT32,
-            .dimensions = {},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
-        },
-        {
-            .type = OperandType::TENSOR_INT32,
-            .dimensions = {2},
-            .numberOfConsumers = 0,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_OUTPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        }
-    };
-
-    const std::vector<Operation> operations = {
-        {
-            .type = OperationType::ARGMIN,
-            .inputs = {0, 1},
-            .outputs = {2},
-        }
-    };
-
-    const std::vector<uint32_t> inputIndexes = {0};
-    const std::vector<uint32_t> outputIndexes = {2};
-    std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0
-    };
-    const std::vector<hidl_memory> pools = {};
-
-    return {
-        .operands = operands,
-        .operations = operations,
-        .inputIndexes = inputIndexes,
-        .outputIndexes = outputIndexes,
-        .operandValues = operandValues,
-        .pools = pools,
-    };
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/vts_models/argmin_3.model.cpp b/nn/runtime/test/generated/vts_models/argmin_3.model.cpp
new file mode 100644
index 0000000..ab53fbb
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/argmin_3.model.cpp
@@ -0,0 +1,313 @@
+// clang-format off
+// Generated file (from: argmin_3.mod.py). Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ARGMIN,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {
+      255, 255, 255, 255
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ARGMIN,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {
+      255, 255, 255, 255
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ARGMIN,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {
+      255, 255, 255, 255
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_int32() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ARGMIN,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {
+      255, 255, 255, 255
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_int32(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_quant8() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 1.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ARGMIN,
+            .inputs = {0, 1},
+            .outputs = {2},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {2};
+    std::vector<uint8_t> operandValues = {
+      255, 255, 255, 255
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/vts_models/argmin_3_float.model.cpp b/nn/runtime/test/generated/vts_models/argmin_3_float.model.cpp
deleted file mode 100644
index 881272e..0000000
--- a/nn/runtime/test/generated/vts_models/argmin_3_float.model.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-// clang-format off
-// Generated file (from: argmin_3_float.mod.py). Do not edit
-// Create the model
-Model createTestModel() {
-    const std::vector<Operand> operands = {
-        {
-            .type = OperandType::TENSOR_FLOAT32,
-            .dimensions = {2, 2},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::INT32,
-            .dimensions = {},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
-        },
-        {
-            .type = OperandType::TENSOR_INT32,
-            .dimensions = {2},
-            .numberOfConsumers = 0,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_OUTPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        }
-    };
-
-    const std::vector<Operation> operations = {
-        {
-            .type = OperationType::ARGMIN,
-            .inputs = {0, 1},
-            .outputs = {2},
-        }
-    };
-
-    const std::vector<uint32_t> inputIndexes = {0};
-    const std::vector<uint32_t> outputIndexes = {2};
-    std::vector<uint8_t> operandValues = {
-      255, 255, 255, 255
-    };
-    const std::vector<hidl_memory> pools = {};
-
-    return {
-        .operands = operands,
-        .operations = operations,
-        .inputIndexes = inputIndexes,
-        .outputIndexes = outputIndexes,
-        .operandValues = operandValues,
-        .pools = pools,
-    };
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/nn/runtime/test/generated/vts_models/batch_to_space_v1_2.model.cpp b/nn/runtime/test/generated/vts_models/batch_to_space_v1_2.model.cpp
index 385c179..5ee2652 100644
--- a/nn/runtime/test/generated/vts_models/batch_to_space_v1_2.model.cpp
+++ b/nn/runtime/test/generated/vts_models/batch_to_space_v1_2.model.cpp
@@ -144,6 +144,77 @@
 }
 
 // Create the model
+Model createTestModel_nhwc_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BATCH_TO_SPACE_ND,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 2, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nhwc_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nhwc_quant8() {
     const std::vector<Operand> operands = {
         {
@@ -358,6 +429,77 @@
 }
 
 // Create the model
+Model createTestModel_nchw_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4, 2, 1, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BATCH_TO_SPACE_ND,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 2, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nchw_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nchw_quant8() {
     const std::vector<Operand> operands = {
         {
@@ -572,6 +714,77 @@
 }
 
 // Create the model
+Model createTestModel_nhwc_float16_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 4, 4, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BATCH_TO_SPACE_ND,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 2, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nhwc_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nhwc_quant8_2() {
     const std::vector<Operand> operands = {
         {
@@ -786,6 +999,77 @@
 }
 
 // Create the model
+Model createTestModel_nchw_float16_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4, 1, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 1, 4, 4},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BATCH_TO_SPACE_ND,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 2, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nchw_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nchw_quant8_2() {
     const std::vector<Operand> operands = {
         {
diff --git a/nn/runtime/test/generated/vts_models/concat_mixed_quant.model.cpp b/nn/runtime/test/generated/vts_models/concat_mixed_quant.model.cpp
new file mode 100644
index 0000000..1edf35d
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/concat_mixed_quant.model.cpp
@@ -0,0 +1,180 @@
+// clang-format off
+// Generated file (from: concat_mixed_quant.mod.py). Do not edit
+// Create the model
+Model createTestModel_quant8() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {2, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.084f,
+            .zeroPoint = 127,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {2, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.05f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {2, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.089f,
+            .zeroPoint = 123,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {2, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.029f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {2, 1, 8},
+            .numberOfConsumers = 0,
+            .scale = 0.1f,
+            .zeroPoint = 127,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::CONCATENATION,
+            .inputs = {0, 1, 2, 3, 4},
+            .outputs = {5},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2, 3};
+    const std::vector<uint32_t> outputIndexes = {5};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_quant8_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {2, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.084f,
+            .zeroPoint = 127,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {2, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.05f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {2, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.089f,
+            .zeroPoint = 123,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {2, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.029f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {2, 1, 8},
+            .numberOfConsumers = 0,
+            .scale = 0.0078125f,
+            .zeroPoint = 127,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::CONCATENATION,
+            .inputs = {0, 1, 2, 3, 4},
+            .outputs = {5},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2, 3};
+    const std::vector<uint32_t> outputIndexes = {5};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_quant8_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/vts_models/depth_to_space_v1_2.model.cpp b/nn/runtime/test/generated/vts_models/depth_to_space_v1_2.model.cpp
index 787202b..e3bad6c 100644
--- a/nn/runtime/test/generated/vts_models/depth_to_space_v1_2.model.cpp
+++ b/nn/runtime/test/generated/vts_models/depth_to_space_v1_2.model.cpp
@@ -144,6 +144,77 @@
 }
 
 // Create the model
+Model createTestModel_nhwc_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 1, 1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DEPTH_TO_SPACE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nhwc_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nhwc_quant8() {
     const std::vector<Operand> operands = {
         {
@@ -358,6 +429,77 @@
 }
 
 // Create the model
+Model createTestModel_nchw_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 8, 1, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DEPTH_TO_SPACE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nchw_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nchw_quant8() {
     const std::vector<Operand> operands = {
         {
@@ -572,6 +714,77 @@
 }
 
 // Create the model
+Model createTestModel_nhwc_float16_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 4, 4, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DEPTH_TO_SPACE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nhwc_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nhwc_quant8_2() {
     const std::vector<Operand> operands = {
         {
@@ -786,6 +999,77 @@
 }
 
 // Create the model
+Model createTestModel_nchw_float16_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 4, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 1, 4, 4},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DEPTH_TO_SPACE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nchw_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nchw_quant8_2() {
     const std::vector<Operand> operands = {
         {
@@ -1000,6 +1284,77 @@
 }
 
 // Create the model
+Model createTestModel_nhwc_float16_3() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 4, 4, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DEPTH_TO_SPACE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nhwc_float16_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nhwc_quant8_3() {
     const std::vector<Operand> operands = {
         {
@@ -1214,6 +1569,77 @@
 }
 
 // Create the model
+Model createTestModel_nchw_float16_3() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 8, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 4, 4},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DEPTH_TO_SPACE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nchw_float16_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nchw_quant8_3() {
     const std::vector<Operand> operands = {
         {
diff --git a/nn/runtime/test/generated/vts_models/depthwise_conv2d_v1_2.model.cpp b/nn/runtime/test/generated/vts_models/depthwise_conv2d_v1_2.model.cpp
index 6e273b1..0005247 100644
--- a/nn/runtime/test/generated/vts_models/depthwise_conv2d_v1_2.model.cpp
+++ b/nn/runtime/test/generated/vts_models/depthwise_conv2d_v1_2.model.cpp
@@ -306,6 +306,158 @@
 }
 
 // Create the model
+Model createTestModel_nhwc_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 3, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 32},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 32, .length = 8},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 56, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 60, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 64, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 68, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 72, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 4},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DEPTHWISE_CONV_2D,
+            .inputs = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
+            .outputs = {12},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {12};
+    std::vector<uint8_t> operandValues = {
+      0, 52, 0, 0, 102, 50, 0, 0, 0, 52, 0, 0, 0, 0, 205, 52, 0, 52, 0, 0, 0, 0, 0, 0, 0, 52, 102, 46, 0, 0, 0, 0, 0, 60, 0, 64, 0, 66, 0, 68, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nhwc_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nhwc_quant8() {
     const std::vector<Operand> operands = {
         {
@@ -763,6 +915,158 @@
 }
 
 // Create the model
+Model createTestModel_nhwc_weight_as_input_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 3, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 12, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 20, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 24, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 28, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 32, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 4},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DEPTHWISE_CONV_2D,
+            .inputs = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
+            .outputs = {12},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {12};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nhwc_weight_as_input_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nhwc_weight_as_input_quant8() {
     const std::vector<Operand> operands = {
         {
@@ -1220,6 +1524,158 @@
 }
 
 // Create the model
+Model createTestModel_nchw_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 3, 3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 32},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 32, .length = 8},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 56, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 60, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 64, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 68, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 72, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 4, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DEPTHWISE_CONV_2D,
+            .inputs = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
+            .outputs = {12},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {12};
+    std::vector<uint8_t> operandValues = {
+      0, 52, 0, 0, 102, 50, 0, 0, 0, 52, 0, 0, 0, 0, 205, 52, 0, 52, 0, 0, 0, 0, 0, 0, 0, 52, 102, 46, 0, 0, 0, 0, 0, 60, 0, 64, 0, 66, 0, 68, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nchw_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nchw_quant8() {
     const std::vector<Operand> operands = {
         {
@@ -1677,6 +2133,158 @@
 }
 
 // Create the model
+Model createTestModel_nchw_weight_as_input_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 3, 3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 12, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 20, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 24, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 28, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 32, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 4, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DEPTHWISE_CONV_2D,
+            .inputs = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
+            .outputs = {12},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {12};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nchw_weight_as_input_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nchw_weight_as_input_quant8() {
     const std::vector<Operand> operands = {
         {
@@ -2080,6 +2688,131 @@
 }
 
 // Create the model
+Model createTestModel_nhwc_float16_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 3, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 32},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 32, .length = 8},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 56, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 60, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 1, 4},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DEPTHWISE_CONV_2D,
+            .inputs = {0, 1, 2, 3, 4, 5, 6, 7, 8},
+            .outputs = {9},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {9};
+    std::vector<uint8_t> operandValues = {
+      0, 60, 0, 64, 0, 66, 0, 68, 128, 200, 0, 73, 128, 201, 0, 74, 0, 69, 0, 70, 0, 71, 0, 72, 128, 74, 0, 203, 128, 75, 0, 204, 0, 60, 0, 64, 0, 66, 0, 68, 2, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nhwc_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nhwc_quant8_2() {
     const std::vector<Operand> operands = {
         {
@@ -2456,6 +3189,131 @@
 }
 
 // Create the model
+Model createTestModel_nhwc_weight_as_input_float16_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 3, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 12, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 20, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 1, 4},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DEPTHWISE_CONV_2D,
+            .inputs = {0, 1, 2, 3, 4, 5, 6, 7, 8},
+            .outputs = {9},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {9};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nhwc_weight_as_input_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nhwc_weight_as_input_quant8_2() {
     const std::vector<Operand> operands = {
         {
@@ -2832,6 +3690,131 @@
 }
 
 // Create the model
+Model createTestModel_nchw_float16_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 32},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 32, .length = 8},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 56, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 60, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 4, 2, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DEPTHWISE_CONV_2D,
+            .inputs = {0, 1, 2, 3, 4, 5, 6, 7, 8},
+            .outputs = {9},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {9};
+    std::vector<uint8_t> operandValues = {
+      0, 60, 0, 64, 0, 66, 0, 68, 128, 200, 0, 73, 128, 201, 0, 74, 0, 69, 0, 70, 0, 71, 0, 72, 128, 74, 0, 203, 128, 75, 0, 204, 0, 60, 0, 64, 0, 66, 0, 68, 2, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nchw_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nchw_quant8_2() {
     const std::vector<Operand> operands = {
         {
@@ -3208,6 +4191,131 @@
 }
 
 // Create the model
+Model createTestModel_nchw_weight_as_input_float16_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 12, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 20, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 4, 2, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DEPTHWISE_CONV_2D,
+            .inputs = {0, 1, 2, 3, 4, 5, 6, 7, 8},
+            .outputs = {9},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {9};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nchw_weight_as_input_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nchw_weight_as_input_quant8_2() {
     const std::vector<Operand> operands = {
         {
@@ -3638,6 +4746,158 @@
 }
 
 // Create the model
+Model createTestModel_large_nhwc_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 16},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 20, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 24, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 28, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 32, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 36, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DEPTHWISE_CONV_2D,
+            .inputs = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
+            .outputs = {12},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {12};
+    std::vector<uint8_t> operandValues = {
+      0, 52, 0, 0, 0, 52, 0, 60, 0, 52, 0, 0, 0, 52, 0, 60, 64, 86, 64, 90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_large_nhwc_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_large_nhwc_quant8() {
     const std::vector<Operand> operands = {
         {
@@ -4095,6 +5355,158 @@
 }
 
 // Create the model
+Model createTestModel_large_nhwc_weight_as_input_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 12, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 20, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 24, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 28, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 32, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DEPTHWISE_CONV_2D,
+            .inputs = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
+            .outputs = {12},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {12};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_large_nhwc_weight_as_input_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_large_nhwc_weight_as_input_quant8() {
     const std::vector<Operand> operands = {
         {
@@ -4552,6 +5964,158 @@
 }
 
 // Create the model
+Model createTestModel_large_nchw_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 16},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 20, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 24, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 28, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 32, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 36, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 1, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DEPTHWISE_CONV_2D,
+            .inputs = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
+            .outputs = {12},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {12};
+    std::vector<uint8_t> operandValues = {
+      0, 52, 0, 0, 0, 52, 0, 60, 0, 52, 0, 0, 0, 52, 0, 60, 64, 86, 64, 90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_large_nchw_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_large_nchw_quant8() {
     const std::vector<Operand> operands = {
         {
@@ -5009,6 +6573,158 @@
 }
 
 // Create the model
+Model createTestModel_large_nchw_weight_as_input_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 12, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 20, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 24, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 28, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 32, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 1, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DEPTHWISE_CONV_2D,
+            .inputs = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
+            .outputs = {12},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {12};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_large_nchw_weight_as_input_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_large_nchw_weight_as_input_quant8() {
     const std::vector<Operand> operands = {
         {
@@ -5466,6 +7182,158 @@
 }
 
 // Create the model
+Model createTestModel_large_nhwc_float16_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 32},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 32, .length = 8},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 56, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 60, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 64, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 68, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 72, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 1, 1, 4},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DEPTHWISE_CONV_2D,
+            .inputs = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
+            .outputs = {12},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {12};
+    std::vector<uint8_t> operandValues = {
+      0, 52, 0, 0, 0, 73, 64, 82, 0, 52, 0, 60, 0, 77, 64, 82, 0, 52, 0, 0, 128, 79, 64, 82, 0, 52, 0, 60, 0, 81, 64, 82, 220, 109, 214, 110, 208, 111, 101, 112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_large_nhwc_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_large_nhwc_quant8_2() {
     const std::vector<Operand> operands = {
         {
@@ -5923,6 +7791,158 @@
 }
 
 // Create the model
+Model createTestModel_large_nhwc_weight_as_input_float16_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 12, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 20, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 24, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 28, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 32, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 1, 1, 4},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DEPTHWISE_CONV_2D,
+            .inputs = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
+            .outputs = {12},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {12};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_large_nhwc_weight_as_input_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_large_nhwc_weight_as_input_quant8_2() {
     const std::vector<Operand> operands = {
         {
@@ -6380,6 +8400,158 @@
 }
 
 // Create the model
+Model createTestModel_large_nchw_float16_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 4, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 32},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 32, .length = 8},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 56, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 60, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 64, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 68, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 72, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 4, 1, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DEPTHWISE_CONV_2D,
+            .inputs = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
+            .outputs = {12},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {12};
+    std::vector<uint8_t> operandValues = {
+      0, 52, 0, 0, 0, 73, 64, 82, 0, 52, 0, 60, 0, 77, 64, 82, 0, 52, 0, 0, 128, 79, 64, 82, 0, 52, 0, 60, 0, 81, 64, 82, 220, 109, 214, 110, 208, 111, 101, 112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_large_nchw_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_large_nchw_quant8_2() {
     const std::vector<Operand> operands = {
         {
@@ -6837,6 +9009,158 @@
 }
 
 // Create the model
+Model createTestModel_large_nchw_weight_as_input_float16_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 4, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 12, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 20, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 24, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 28, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 32, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 4, 1, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DEPTHWISE_CONV_2D,
+            .inputs = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
+            .outputs = {12},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {12};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_large_nchw_weight_as_input_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_large_nchw_weight_as_input_quant8_2() {
     const std::vector<Operand> operands = {
         {
diff --git a/nn/runtime/test/generated/vts_models/argmax_2_float_relaxed.model.cpp b/nn/runtime/test/generated/vts_models/pad_float16.model.cpp
similarity index 78%
copy from nn/runtime/test/generated/vts_models/argmax_2_float_relaxed.model.cpp
copy to nn/runtime/test/generated/vts_models/pad_float16.model.cpp
index ac9ff41..906050e 100644
--- a/nn/runtime/test/generated/vts_models/argmax_2_float_relaxed.model.cpp
+++ b/nn/runtime/test/generated/vts_models/pad_float16.model.cpp
@@ -1,11 +1,11 @@
 // clang-format off
-// Generated file (from: argmax_2_float_relaxed.mod.py). Do not edit
+// Generated file (from: pad_float16.mod.py). Do not edit
 // Create the model
 Model createTestModel() {
     const std::vector<Operand> operands = {
         {
-            .type = OperandType::TENSOR_FLOAT32,
-            .dimensions = {2, 2},
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 1},
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
@@ -13,17 +13,17 @@
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
         },
         {
-            .type = OperandType::INT32,
-            .dimensions = {},
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4, 2},
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+            .location = {.poolIndex = 0, .offset = 0, .length = 32},
         },
         {
-            .type = OperandType::TENSOR_INT32,
-            .dimensions = {2},
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 4, 4, 1},
             .numberOfConsumers = 0,
             .scale = 0.0f,
             .zeroPoint = 0,
@@ -34,7 +34,7 @@
 
     const std::vector<Operation> operations = {
         {
-            .type = OperationType::ARGMAX,
+            .type = OperationType::PAD,
             .inputs = {0, 1},
             .outputs = {2},
         }
@@ -43,7 +43,7 @@
     const std::vector<uint32_t> inputIndexes = {0};
     const std::vector<uint32_t> outputIndexes = {2};
     std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0
+      0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
     };
     const std::vector<hidl_memory> pools = {};
 
@@ -54,7 +54,6 @@
         .outputIndexes = outputIndexes,
         .operandValues = operandValues,
         .pools = pools,
-        .relaxComputationFloat32toFloat16 = true,
     };
 }
 
diff --git a/nn/runtime/test/generated/vts_models/pad_v2_1_float.model.cpp b/nn/runtime/test/generated/vts_models/pad_v2_1_float.model.cpp
index 4d4cf25..63571b7 100644
--- a/nn/runtime/test/generated/vts_models/pad_v2_1_float.model.cpp
+++ b/nn/runtime/test/generated/vts_models/pad_v2_1_float.model.cpp
@@ -71,3 +71,74 @@
   return ignore.find(i) != ignore.end();
 }
 
+// Create the model
+Model createTestModel_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 3, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 32},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 32, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 4, 7, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::PAD_V2,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 205, 204, 20, 65
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/vts_models/argmax_3_float.model.cpp b/nn/runtime/test/generated/vts_models/reshape_float16.model.cpp
similarity index 83%
rename from nn/runtime/test/generated/vts_models/argmax_3_float.model.cpp
rename to nn/runtime/test/generated/vts_models/reshape_float16.model.cpp
index caa1a00..644b7c1 100644
--- a/nn/runtime/test/generated/vts_models/argmax_3_float.model.cpp
+++ b/nn/runtime/test/generated/vts_models/reshape_float16.model.cpp
@@ -1,11 +1,11 @@
 // clang-format off
-// Generated file (from: argmax_3_float.mod.py). Do not edit
+// Generated file (from: reshape_float16.mod.py). Do not edit
 // Create the model
 Model createTestModel() {
     const std::vector<Operand> operands = {
         {
-            .type = OperandType::TENSOR_FLOAT32,
-            .dimensions = {2, 2},
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 1, 3, 3},
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
@@ -13,8 +13,8 @@
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
         },
         {
-            .type = OperandType::INT32,
-            .dimensions = {},
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
@@ -22,8 +22,8 @@
             .location = {.poolIndex = 0, .offset = 0, .length = 4},
         },
         {
-            .type = OperandType::TENSOR_INT32,
-            .dimensions = {2},
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {9},
             .numberOfConsumers = 0,
             .scale = 0.0f,
             .zeroPoint = 0,
@@ -34,7 +34,7 @@
 
     const std::vector<Operation> operations = {
         {
-            .type = OperationType::ARGMAX,
+            .type = OperationType::RESHAPE,
             .inputs = {0, 1},
             .outputs = {2},
         }
diff --git a/nn/runtime/test/generated/vts_models/resize_bilinear_v1_2.model.cpp b/nn/runtime/test/generated/vts_models/resize_bilinear_v1_2.model.cpp
index 9ebd9dc..0dc7c33 100644
--- a/nn/runtime/test/generated/vts_models/resize_bilinear_v1_2.model.cpp
+++ b/nn/runtime/test/generated/vts_models/resize_bilinear_v1_2.model.cpp
@@ -162,6 +162,86 @@
 }
 
 // Create the model
+Model createTestModel_nhwc_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 3, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::RESIZE_BILINEAR,
+            .inputs = {0, 1, 2, 3},
+            .outputs = {4},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {4};
+    std::vector<uint8_t> operandValues = {
+      3, 0, 0, 0, 3, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nhwc_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nchw() {
     const std::vector<Operand> operands = {
         {
@@ -323,6 +403,86 @@
 }
 
 // Create the model
+Model createTestModel_nchw_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 1, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 1, 3, 3},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::RESIZE_BILINEAR,
+            .inputs = {0, 1, 2, 3},
+            .outputs = {4},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {4};
+    std::vector<uint8_t> operandValues = {
+      3, 0, 0, 0, 3, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nchw_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nhwc_2() {
     const std::vector<Operand> operands = {
         {
@@ -484,6 +644,86 @@
 }
 
 // Create the model
+Model createTestModel_nhwc_float16_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 3, 3, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::RESIZE_BILINEAR,
+            .inputs = {0, 1, 2, 3},
+            .outputs = {4},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {4};
+    std::vector<uint8_t> operandValues = {
+      3, 0, 0, 0, 3, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nhwc_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nchw_2() {
     const std::vector<Operand> operands = {
         {
@@ -644,3 +884,83 @@
   return ignore.find(i) != ignore.end();
 }
 
+// Create the model
+Model createTestModel_nchw_float16_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 3, 3},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::RESIZE_BILINEAR,
+            .inputs = {0, 1, 2, 3},
+            .outputs = {4},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {4};
+    std::vector<uint8_t> operandValues = {
+      3, 0, 0, 0, 3, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nchw_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/vts_models/roi_pooling.model.cpp b/nn/runtime/test/generated/vts_models/roi_pooling.model.cpp
new file mode 100644
index 0000000..cec26cc
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/roi_pooling.model.cpp
@@ -0,0 +1,1074 @@
+// clang-format off
+// Generated file (from: roi_pooling.mod.py). Do not edit
+// Create the model
+Model createTestModel_nhwc() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 4, 4, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 12, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 2, 2, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ROI_POOLING,
+            .inputs = {0, 1, 2, 3, 4},
+            .outputs = {5},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1};
+    const std::vector<uint32_t> outputIndexes = {5};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 63, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nhwc(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_nhwc_relaxed() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 4, 4, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 12, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 2, 2, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ROI_POOLING,
+            .inputs = {0, 1, 2, 3, 4},
+            .outputs = {5},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1};
+    const std::vector<uint32_t> outputIndexes = {5};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 63, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_nhwc_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_nhwc_quant8() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 4, 4, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.25f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 12, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {4, 2, 2, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.25f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ROI_POOLING,
+            .inputs = {0, 1, 2, 3, 4},
+            .outputs = {5},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1};
+    const std::vector<uint32_t> outputIndexes = {5};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 63, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nhwc_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_nchw() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 1, 4, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 12, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 1, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ROI_POOLING,
+            .inputs = {0, 1, 2, 3, 4},
+            .outputs = {5},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1};
+    const std::vector<uint32_t> outputIndexes = {5};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 63, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nchw(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_nchw_relaxed() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 1, 4, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 12, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 1, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ROI_POOLING,
+            .inputs = {0, 1, 2, 3, 4},
+            .outputs = {5},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1};
+    const std::vector<uint32_t> outputIndexes = {5};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 63, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_nchw_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_nchw_quant8() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 1, 4, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.25f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 12, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {4, 1, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.25f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ROI_POOLING,
+            .inputs = {0, 1, 2, 3, 4},
+            .outputs = {5},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1};
+    const std::vector<uint32_t> outputIndexes = {5};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 63, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nchw_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_nhwc_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2, 4, 8, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 5},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 12, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 2, 3, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ROI_POOLING,
+            .inputs = {0, 1, 2, 3, 4},
+            .outputs = {5},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1};
+    const std::vector<uint32_t> outputIndexes = {5};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 3, 0, 0, 0, 0, 0, 128, 62, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nhwc_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_nhwc_relaxed_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2, 4, 8, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 5},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 12, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 2, 3, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ROI_POOLING,
+            .inputs = {0, 1, 2, 3, 4},
+            .outputs = {5},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1};
+    const std::vector<uint32_t> outputIndexes = {5};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 3, 0, 0, 0, 0, 0, 128, 62, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_nhwc_relaxed_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_nhwc_quant8_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {2, 4, 8, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.04f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 5},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 12, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {4, 2, 3, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.04f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ROI_POOLING,
+            .inputs = {0, 1, 2, 3, 4},
+            .outputs = {5},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1};
+    const std::vector<uint32_t> outputIndexes = {5};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 3, 0, 0, 0, 0, 0, 128, 62, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nhwc_quant8_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_nchw_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2, 2, 4, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 5},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 12, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 2, 2, 3},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ROI_POOLING,
+            .inputs = {0, 1, 2, 3, 4},
+            .outputs = {5},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1};
+    const std::vector<uint32_t> outputIndexes = {5};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 3, 0, 0, 0, 0, 0, 128, 62, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nchw_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_nchw_relaxed_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2, 2, 4, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 5},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 12, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 2, 2, 3},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ROI_POOLING,
+            .inputs = {0, 1, 2, 3, 4},
+            .outputs = {5},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1};
+    const std::vector<uint32_t> outputIndexes = {5};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 3, 0, 0, 0, 0, 0, 128, 62, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_nchw_relaxed_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_nchw_quant8_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {2, 2, 4, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.04f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 5},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 12, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {4, 2, 2, 3},
+            .numberOfConsumers = 0,
+            .scale = 0.04f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ROI_POOLING,
+            .inputs = {0, 1, 2, 3, 4},
+            .outputs = {5},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1};
+    const std::vector<uint32_t> outputIndexes = {5};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 3, 0, 0, 0, 0, 0, 128, 62, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nchw_quant8_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/vts_models/slice.model.cpp b/nn/runtime/test/generated/vts_models/slice.model.cpp
new file mode 100644
index 0000000..79141f7
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/slice.model.cpp
@@ -0,0 +1,1666 @@
+// clang-format off
+// Generated file (from: slice.mod.py). Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2, 3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2, 3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2, 3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_3() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2, 3, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed_3() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2, 3, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16_3() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2, 3, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_4() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 1, 1, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {3, 1, 1, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed_4() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 1, 1, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {3, 1, 1, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16_4() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4, 1, 1, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {3, 1, 1, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_5() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3, 2, 3, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1, 1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_5(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed_5() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3, 2, 3, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1, 1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed_5(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16_5() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3, 2, 3, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1, 1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16_5(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_6() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3, 2, 3, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2, 1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_6(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed_6() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3, 2, 3, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2, 1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed_6(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16_6() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3, 2, 3, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2, 1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16_6(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_7() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {3, 2, 3, 1},
+            .numberOfConsumers = 1,
+            .scale = 2.0f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {2, 1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 2.0f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_7(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed_7() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {3, 2, 3, 1},
+            .numberOfConsumers = 1,
+            .scale = 2.0f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {2, 1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 2.0f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed_7(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16_7() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {3, 2, 3, 1},
+            .numberOfConsumers = 1,
+            .scale = 2.0f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {2, 1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 2.0f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16_7(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_8() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3, 2, 3, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2, 1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed_8() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3, 2, 3, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2, 1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed_8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16_8() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3, 2, 3, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2, 1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16_8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/vts_models/space_to_batch_v1_2.model.cpp b/nn/runtime/test/generated/vts_models/space_to_batch_v1_2.model.cpp
index ea6a078..3203582 100644
--- a/nn/runtime/test/generated/vts_models/space_to_batch_v1_2.model.cpp
+++ b/nn/runtime/test/generated/vts_models/space_to_batch_v1_2.model.cpp
@@ -162,6 +162,86 @@
 }
 
 // Create the model
+Model createTestModel_nhwc_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 16},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 24, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4, 1, 1, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SPACE_TO_BATCH_ND,
+            .inputs = {0, 1, 2, 3},
+            .outputs = {4},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {4};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nhwc_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nhwc_quant8() {
     const std::vector<Operand> operands = {
         {
@@ -403,6 +483,86 @@
 }
 
 // Create the model
+Model createTestModel_nchw_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 16},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 24, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4, 2, 1, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SPACE_TO_BATCH_ND,
+            .inputs = {0, 1, 2, 3},
+            .outputs = {4},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {4};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nchw_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nchw_quant8() {
     const std::vector<Operand> operands = {
         {
@@ -644,6 +804,86 @@
 }
 
 // Create the model
+Model createTestModel_nhwc_float16_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 4, 4, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 16},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 24, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4, 2, 2, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SPACE_TO_BATCH_ND,
+            .inputs = {0, 1, 2, 3},
+            .outputs = {4},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {4};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nhwc_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nhwc_quant8_2() {
     const std::vector<Operand> operands = {
         {
@@ -885,6 +1125,86 @@
 }
 
 // Create the model
+Model createTestModel_nchw_float16_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 1, 4, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 16},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 24, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4, 1, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SPACE_TO_BATCH_ND,
+            .inputs = {0, 1, 2, 3},
+            .outputs = {4},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {4};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nchw_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nchw_quant8_2() {
     const std::vector<Operand> operands = {
         {
@@ -1126,6 +1446,86 @@
 }
 
 // Create the model
+Model createTestModel_nhwc_float16_3() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 5, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 16},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 24, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {6, 2, 2, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SPACE_TO_BATCH_ND,
+            .inputs = {0, 1, 2, 3},
+            .outputs = {4},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {4};
+    std::vector<uint8_t> operandValues = {
+      3, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nhwc_float16_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nhwc_quant8_3() {
     const std::vector<Operand> operands = {
         {
@@ -1367,6 +1767,86 @@
 }
 
 // Create the model
+Model createTestModel_nchw_float16_3() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 1, 5, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 16},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 24, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {6, 1, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SPACE_TO_BATCH_ND,
+            .inputs = {0, 1, 2, 3},
+            .outputs = {4},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {4};
+    std::vector<uint8_t> operandValues = {
+      3, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nchw_float16_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nchw_quant8_3() {
     const std::vector<Operand> operands = {
         {
@@ -1608,6 +2088,86 @@
 }
 
 // Create the model
+Model createTestModel_nhwc_float16_4() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 4, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 16},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 24, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {6, 2, 4, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SPACE_TO_BATCH_ND,
+            .inputs = {0, 1, 2, 3},
+            .outputs = {4},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {4};
+    std::vector<uint8_t> operandValues = {
+      3, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 4, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nhwc_float16_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nhwc_quant8_4() {
     const std::vector<Operand> operands = {
         {
@@ -1849,6 +2409,86 @@
 }
 
 // Create the model
+Model createTestModel_nchw_float16_4() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 1, 4, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 16},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 24, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {6, 1, 2, 4},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SPACE_TO_BATCH_ND,
+            .inputs = {0, 1, 2, 3},
+            .outputs = {4},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {4};
+    std::vector<uint8_t> operandValues = {
+      3, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 4, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nchw_float16_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nchw_quant8_4() {
     const std::vector<Operand> operands = {
         {
diff --git a/nn/runtime/test/generated/vts_models/space_to_depth_v1_2.model.cpp b/nn/runtime/test/generated/vts_models/space_to_depth_v1_2.model.cpp
index ab66f1d..aca8bff 100644
--- a/nn/runtime/test/generated/vts_models/space_to_depth_v1_2.model.cpp
+++ b/nn/runtime/test/generated/vts_models/space_to_depth_v1_2.model.cpp
@@ -144,6 +144,77 @@
 }
 
 // Create the model
+Model createTestModel_nhwc_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 1, 1, 8},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SPACE_TO_DEPTH,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nhwc_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nhwc_quant8() {
     const std::vector<Operand> operands = {
         {
@@ -358,6 +429,77 @@
 }
 
 // Create the model
+Model createTestModel_nchw_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 8, 1, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SPACE_TO_DEPTH,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nchw_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nchw_quant8() {
     const std::vector<Operand> operands = {
         {
@@ -572,6 +714,77 @@
 }
 
 // Create the model
+Model createTestModel_nhwc_float16_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 4, 4, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 4},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SPACE_TO_DEPTH,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nhwc_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nhwc_quant8_2() {
     const std::vector<Operand> operands = {
         {
@@ -786,6 +999,77 @@
 }
 
 // Create the model
+Model createTestModel_nchw_float16_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 1, 4, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 4, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SPACE_TO_DEPTH,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nchw_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nchw_quant8_2() {
     const std::vector<Operand> operands = {
         {
@@ -1000,6 +1284,77 @@
 }
 
 // Create the model
+Model createTestModel_nhwc_float16_3() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 4, 4, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 8},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SPACE_TO_DEPTH,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nhwc_float16_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nhwc_quant8_3() {
     const std::vector<Operand> operands = {
         {
@@ -1214,6 +1569,77 @@
 }
 
 // Create the model
+Model createTestModel_nchw_float16_3() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 4, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 8, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SPACE_TO_DEPTH,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nchw_float16_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
 Model createTestModel_nchw_quant8_3() {
     const std::vector<Operand> operands = {
         {
diff --git a/nn/runtime/test/generated/vts_models/argmin_1_int32.model.cpp b/nn/runtime/test/generated/vts_models/squeeze_float16.model.cpp
similarity index 83%
rename from nn/runtime/test/generated/vts_models/argmin_1_int32.model.cpp
rename to nn/runtime/test/generated/vts_models/squeeze_float16.model.cpp
index 6ac3ef4..f2d8e21 100644
--- a/nn/runtime/test/generated/vts_models/argmin_1_int32.model.cpp
+++ b/nn/runtime/test/generated/vts_models/squeeze_float16.model.cpp
@@ -1,11 +1,11 @@
 // clang-format off
-// Generated file (from: argmin_1_int32.mod.py). Do not edit
+// Generated file (from: squeeze_float16.mod.py). Do not edit
 // Create the model
 Model createTestModel() {
     const std::vector<Operand> operands = {
         {
-            .type = OperandType::TENSOR_INT32,
-            .dimensions = {2, 2},
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4, 1, 1, 2},
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
@@ -13,17 +13,17 @@
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
         },
         {
-            .type = OperandType::INT32,
-            .dimensions = {},
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
         },
         {
-            .type = OperandType::TENSOR_INT32,
-            .dimensions = {2},
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4, 2},
             .numberOfConsumers = 0,
             .scale = 0.0f,
             .zeroPoint = 0,
@@ -34,7 +34,7 @@
 
     const std::vector<Operation> operations = {
         {
-            .type = OperationType::ARGMIN,
+            .type = OperationType::SQUEEZE,
             .inputs = {0, 1},
             .outputs = {2},
         }
@@ -43,7 +43,7 @@
     const std::vector<uint32_t> inputIndexes = {0};
     const std::vector<uint32_t> outputIndexes = {2};
     std::vector<uint8_t> operandValues = {
-      1, 0, 0, 0
+      1, 0, 0, 0, 2, 0, 0, 0
     };
     const std::vector<hidl_memory> pools = {};
 
diff --git a/nn/runtime/test/generated/vts_models/strided_slice_float16.model.cpp b/nn/runtime/test/generated/vts_models/strided_slice_float16.model.cpp
new file mode 100644
index 0000000..31fd792
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/strided_slice_float16.model.cpp
@@ -0,0 +1,109 @@
+// clang-format off
+// Generated file (from: strided_slice_float16.mod.py). Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2, 3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 8},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 8},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 24, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 28, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 32, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::STRIDED_SLICE,
+            .inputs = {0, 1, 2, 3, 4, 5, 6},
+            .outputs = {7},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {7};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/vts_models/argmax_2_float_relaxed.model.cpp b/nn/runtime/test/generated/vts_models/transpose_float16.model.cpp
similarity index 80%
rename from nn/runtime/test/generated/vts_models/argmax_2_float_relaxed.model.cpp
rename to nn/runtime/test/generated/vts_models/transpose_float16.model.cpp
index ac9ff41..03122d3 100644
--- a/nn/runtime/test/generated/vts_models/argmax_2_float_relaxed.model.cpp
+++ b/nn/runtime/test/generated/vts_models/transpose_float16.model.cpp
@@ -1,11 +1,11 @@
 // clang-format off
-// Generated file (from: argmax_2_float_relaxed.mod.py). Do not edit
+// Generated file (from: transpose_float16.mod.py). Do not edit
 // Create the model
 Model createTestModel() {
     const std::vector<Operand> operands = {
         {
-            .type = OperandType::TENSOR_FLOAT32,
-            .dimensions = {2, 2},
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 1},
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
@@ -13,17 +13,17 @@
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
         },
         {
-            .type = OperandType::INT32,
-            .dimensions = {},
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
             .numberOfConsumers = 1,
             .scale = 0.0f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+            .location = {.poolIndex = 0, .offset = 0, .length = 16},
         },
         {
-            .type = OperandType::TENSOR_INT32,
-            .dimensions = {2},
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 1},
             .numberOfConsumers = 0,
             .scale = 0.0f,
             .zeroPoint = 0,
@@ -34,7 +34,7 @@
 
     const std::vector<Operation> operations = {
         {
-            .type = OperationType::ARGMAX,
+            .type = OperationType::TRANSPOSE,
             .inputs = {0, 1},
             .outputs = {2},
         }
@@ -43,7 +43,7 @@
     const std::vector<uint32_t> inputIndexes = {0};
     const std::vector<uint32_t> outputIndexes = {2};
     std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0
+      0, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0
     };
     const std::vector<hidl_memory> pools = {};
 
@@ -54,7 +54,6 @@
         .outputIndexes = outputIndexes,
         .operandValues = operandValues,
         .pools = pools,
-        .relaxComputationFloat32toFloat16 = true,
     };
 }
 
diff --git a/nn/runtime/test/specs/V1_2/argmin_1_float_relaxed.mod.py b/nn/runtime/test/specs/V1_2/abs.mod.py
similarity index 62%
copy from nn/runtime/test/specs/V1_2/argmin_1_float_relaxed.mod.py
copy to nn/runtime/test/specs/V1_2/abs.mod.py
index f58f2b8..376769e 100644
--- a/nn/runtime/test/specs/V1_2/argmin_1_float_relaxed.mod.py
+++ b/nn/runtime/test/specs/V1_2/abs.mod.py
@@ -14,16 +14,14 @@
 # limitations under the License.
 #
 
-input0 = Input("input0", "TENSOR_FLOAT32", "{2, 2}")
-axis = Int32Scalar("axis", 1)
-output0 = Output("output", "TENSOR_INT32", "{2}")
+input0 = Input("input0", "TENSOR_FLOAT32", "{1, 2, 3, 4, 5}")
+output0 = Output("output0", "TENSOR_FLOAT32", "{1, 2, 3, 4, 5}")
+model = Model().Operation("ABS", input0).To(output0)
 
-model = Model().Operation("ARGMIN", input0, axis).To(output0)
-model.RelaxedExecution(True)
+input_data = [(i - 60) / 10 for i in range(120)]
+output_data = [abs(x) for x in input_data]
 
-Example(({
-    input0: [1.0, 2.0,
-             4.0, 3.0],
-}, {
-    output0: [0, 1],
-}))
+Example({
+    input0: input_data,
+    output0: output_data,
+}).AddVariations("relaxed", "float16")
diff --git a/nn/runtime/test/specs/V1_2/argmax_1_float.mod.py b/nn/runtime/test/specs/V1_2/argmax_1.mod.py
similarity index 84%
rename from nn/runtime/test/specs/V1_2/argmax_1_float.mod.py
rename to nn/runtime/test/specs/V1_2/argmax_1.mod.py
index a567adc..6dc7430 100644
--- a/nn/runtime/test/specs/V1_2/argmax_1_float.mod.py
+++ b/nn/runtime/test/specs/V1_2/argmax_1.mod.py
@@ -20,9 +20,12 @@
 
 model = Model().Operation("ARGMAX", input0, axis).To(output0)
 
-Example(({
+quant8 = DataTypeConverter().Identify({
+    input0: ["TENSOR_QUANT8_ASYMM", 1.0, 0],
+})
+
+Example({
     input0: [1.0, 2.0,
              4.0, 3.0],
-}, {
     output0: [1, 0],
-}))
+}).AddVariations("relaxed", "float16", "int32", quant8)
diff --git a/nn/runtime/test/specs/V1_2/argmax_1_float_relaxed.mod.py b/nn/runtime/test/specs/V1_2/argmax_1_float_relaxed.mod.py
deleted file mode 100644
index a54e5e6..0000000
--- a/nn/runtime/test/specs/V1_2/argmax_1_float_relaxed.mod.py
+++ /dev/null
@@ -1,29 +0,0 @@
-#
-# Copyright (C) 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-input0 = Input("input0", "TENSOR_FLOAT32", "{2, 2}")
-axis = Int32Scalar("axis", 1)
-output0 = Output("output", "TENSOR_INT32", "{2}")
-
-model = Model().Operation("ARGMAX", input0, axis).To(output0)
-model.RelaxedExecution(True)
-
-Example(({
-    input0: [1.0, 2.0,
-             4.0, 3.0],
-}, {
-    output0: [1, 0],
-}))
diff --git a/nn/runtime/test/specs/V1_2/argmax_1_int32.mod.py b/nn/runtime/test/specs/V1_2/argmax_1_int32.mod.py
deleted file mode 100644
index b69e1f5..0000000
--- a/nn/runtime/test/specs/V1_2/argmax_1_int32.mod.py
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-# Copyright (C) 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-input0 = Input("input0", "TENSOR_INT32", "{2, 2}")
-axis = Int32Scalar("axis", 1)
-output0 = Output("output", "TENSOR_INT32", "{2}")
-
-model = Model().Operation("ARGMAX", input0, axis).To(output0)
-
-Example(({
-    input0: [1, 2,
-             4, 3],
-}, {
-    output0: [1, 0],
-}))
diff --git a/nn/runtime/test/specs/V1_2/argmax_1_quant8.mod.py b/nn/runtime/test/specs/V1_2/argmax_1_quant8.mod.py
deleted file mode 100644
index 38c37ce..0000000
--- a/nn/runtime/test/specs/V1_2/argmax_1_quant8.mod.py
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-# Copyright (C) 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-input0 = Input("input0", "TENSOR_QUANT8_ASYMM", "{2, 2}, 1.0, 0")
-axis = Int32Scalar("axis", 1)
-output0 = Output("output", "TENSOR_INT32", "{2}")
-
-model = Model().Operation("ARGMAX", input0, axis).To(output0)
-
-Example(({
-    input0: [1, 2,
-             4, 3],
-}, {
-    output0: [1, 0],
-}))
diff --git a/nn/runtime/test/specs/V1_2/argmax_2_float_relaxed.mod.py b/nn/runtime/test/specs/V1_2/argmax_2.mod.py
similarity index 84%
rename from nn/runtime/test/specs/V1_2/argmax_2_float_relaxed.mod.py
rename to nn/runtime/test/specs/V1_2/argmax_2.mod.py
index 5531830..69be607 100644
--- a/nn/runtime/test/specs/V1_2/argmax_2_float_relaxed.mod.py
+++ b/nn/runtime/test/specs/V1_2/argmax_2.mod.py
@@ -19,11 +19,13 @@
 output0 = Output("output", "TENSOR_INT32", "{2}")
 
 model = Model().Operation("ARGMAX", input0, axis).To(output0)
-model.RelaxedExecution(True)
 
-Example(({
+quant8 = DataTypeConverter().Identify({
+    input0: ["TENSOR_QUANT8_ASYMM", 1.0, 0],
+})
+
+Example({
     input0: [1.0, 2.0,
              4.0, 3.0],
-}, {
     output0: [1, 1],
-}))
+}).AddVariations("relaxed", "float16", "int32", quant8)
diff --git a/nn/runtime/test/specs/V1_2/argmax_2_float.mod.py b/nn/runtime/test/specs/V1_2/argmax_2_float.mod.py
deleted file mode 100644
index f1fb189..0000000
--- a/nn/runtime/test/specs/V1_2/argmax_2_float.mod.py
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-# Copyright (C) 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-input0 = Input("input0", "TENSOR_FLOAT32", "{2, 2}")
-axis = Int32Scalar("axis", 0)
-output0 = Output("output", "TENSOR_INT32", "{2}")
-
-model = Model().Operation("ARGMAX", input0, axis).To(output0)
-
-Example(({
-    input0: [1.0, 2.0,
-             4.0, 3.0],
-}, {
-    output0: [1, 1],
-}))
diff --git a/nn/runtime/test/specs/V1_2/argmax_2_int32.mod.py b/nn/runtime/test/specs/V1_2/argmax_2_int32.mod.py
deleted file mode 100644
index ad0a1ce..0000000
--- a/nn/runtime/test/specs/V1_2/argmax_2_int32.mod.py
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-# Copyright (C) 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-input0 = Input("input0", "TENSOR_INT32", "{2, 2}")
-axis = Int32Scalar("axis", 0)
-output0 = Output("output", "TENSOR_INT32", "{2}")
-
-model = Model().Operation("ARGMAX", input0, axis).To(output0)
-
-Example(({
-    input0: [1, 2,
-             4, 3],
-}, {
-    output0: [1, 1],
-}))
diff --git a/nn/runtime/test/specs/V1_2/argmax_2_quant8.mod.py b/nn/runtime/test/specs/V1_2/argmax_2_quant8.mod.py
deleted file mode 100644
index abe5395..0000000
--- a/nn/runtime/test/specs/V1_2/argmax_2_quant8.mod.py
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-# Copyright (C) 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-input0 = Input("input0", "TENSOR_QUANT8_ASYMM", "{2, 2}, 1.0, 0")
-axis = Int32Scalar("axis", 0)
-output0 = Output("output", "TENSOR_INT32", "{2}")
-
-model = Model().Operation("ARGMAX", input0, axis).To(output0)
-
-Example(({
-    input0: [1, 2,
-             4, 3],
-}, {
-    output0: [1, 1],
-}))
diff --git a/nn/runtime/test/specs/V1_2/argmax_3_float.mod.py b/nn/runtime/test/specs/V1_2/argmax_3.mod.py
similarity index 85%
rename from nn/runtime/test/specs/V1_2/argmax_3_float.mod.py
rename to nn/runtime/test/specs/V1_2/argmax_3.mod.py
index 96a8348..ab7afc6 100644
--- a/nn/runtime/test/specs/V1_2/argmax_3_float.mod.py
+++ b/nn/runtime/test/specs/V1_2/argmax_3.mod.py
@@ -22,9 +22,12 @@
 
 model = Model().Operation("ARGMAX", input0, axis).To(output0)
 
-Example(({
+quant8 = DataTypeConverter().Identify({
+    input0: ["TENSOR_QUANT8_ASYMM", 1.0, 0],
+})
+
+Example({
     input0: [1.0, 2.0,
              4.0, 3.0],
-}, {
     output0: [1, 0],
-}))
+}).AddVariations("relaxed", "float16", "int32", quant8)
diff --git a/nn/runtime/test/specs/V1_2/argmin_1_float_relaxed.mod.py b/nn/runtime/test/specs/V1_2/argmin_1.mod.py
similarity index 84%
rename from nn/runtime/test/specs/V1_2/argmin_1_float_relaxed.mod.py
rename to nn/runtime/test/specs/V1_2/argmin_1.mod.py
index f58f2b8..e89ceea 100644
--- a/nn/runtime/test/specs/V1_2/argmin_1_float_relaxed.mod.py
+++ b/nn/runtime/test/specs/V1_2/argmin_1.mod.py
@@ -19,11 +19,13 @@
 output0 = Output("output", "TENSOR_INT32", "{2}")
 
 model = Model().Operation("ARGMIN", input0, axis).To(output0)
-model.RelaxedExecution(True)
 
-Example(({
+quant8 = DataTypeConverter().Identify({
+    input0: ["TENSOR_QUANT8_ASYMM", 1.0, 0],
+})
+
+Example({
     input0: [1.0, 2.0,
              4.0, 3.0],
-}, {
     output0: [0, 1],
-}))
+}).AddVariations("relaxed", "float16", "int32", quant8)
diff --git a/nn/runtime/test/specs/V1_2/argmin_1_float.mod.py b/nn/runtime/test/specs/V1_2/argmin_1_float.mod.py
deleted file mode 100644
index 365177c..0000000
--- a/nn/runtime/test/specs/V1_2/argmin_1_float.mod.py
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-# Copyright (C) 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-input0 = Input("input0", "TENSOR_FLOAT32", "{2, 2}")
-axis = Int32Scalar("axis", 1)
-output0 = Output("output", "TENSOR_INT32", "{2}")
-
-model = Model().Operation("ARGMIN", input0, axis).To(output0)
-
-Example(({
-    input0: [1.0, 2.0,
-             4.0, 3.0],
-}, {
-    output0: [0, 1],
-}))
diff --git a/nn/runtime/test/specs/V1_2/argmin_1_int32.mod.py b/nn/runtime/test/specs/V1_2/argmin_1_int32.mod.py
deleted file mode 100644
index e2eef13..0000000
--- a/nn/runtime/test/specs/V1_2/argmin_1_int32.mod.py
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-# Copyright (C) 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-input0 = Input("input0", "TENSOR_INT32", "{2, 2}")
-axis = Int32Scalar("axis", 1)
-output0 = Output("output", "TENSOR_INT32", "{2}")
-
-model = Model().Operation("ARGMIN", input0, axis).To(output0)
-
-Example(({
-    input0: [1, 2,
-             4, 3],
-}, {
-    output0: [0, 1],
-}))
diff --git a/nn/runtime/test/specs/V1_2/argmin_1_quant8.mod.py b/nn/runtime/test/specs/V1_2/argmin_1_quant8.mod.py
deleted file mode 100644
index 994af14..0000000
--- a/nn/runtime/test/specs/V1_2/argmin_1_quant8.mod.py
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-# Copyright (C) 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-input0 = Input("input0", "TENSOR_QUANT8_ASYMM", "{2, 2}, 1.0, 0")
-axis = Int32Scalar("axis", 1)
-output0 = Output("output", "TENSOR_INT32", "{2}")
-
-model = Model().Operation("ARGMIN", input0, axis).To(output0)
-
-Example(({
-    input0: [1, 2,
-             4, 3],
-}, {
-    output0: [0, 1],
-}))
diff --git a/nn/runtime/test/specs/V1_2/argmin_2_float.mod.py b/nn/runtime/test/specs/V1_2/argmin_2.mod.py
similarity index 84%
rename from nn/runtime/test/specs/V1_2/argmin_2_float.mod.py
rename to nn/runtime/test/specs/V1_2/argmin_2.mod.py
index 1640a80..e54cff7 100644
--- a/nn/runtime/test/specs/V1_2/argmin_2_float.mod.py
+++ b/nn/runtime/test/specs/V1_2/argmin_2.mod.py
@@ -20,9 +20,12 @@
 
 model = Model().Operation("ARGMIN", input0, axis).To(output0)
 
-Example(({
+quant8 = DataTypeConverter().Identify({
+    input0: ["TENSOR_QUANT8_ASYMM", 1.0, 0],
+})
+
+Example({
     input0: [1.0, 2.0,
              4.0, 3.0],
-}, {
     output0: [0, 0],
-}))
+}).AddVariations("relaxed", "float16", "int32", quant8)
diff --git a/nn/runtime/test/specs/V1_2/argmin_2_float_relaxed.mod.py b/nn/runtime/test/specs/V1_2/argmin_2_float_relaxed.mod.py
deleted file mode 100644
index 1b9147e..0000000
--- a/nn/runtime/test/specs/V1_2/argmin_2_float_relaxed.mod.py
+++ /dev/null
@@ -1,29 +0,0 @@
-#
-# Copyright (C) 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-input0 = Input("input0", "TENSOR_FLOAT32", "{2, 2}")
-axis = Int32Scalar("axis", 0)
-output0 = Output("output", "TENSOR_INT32", "{2}")
-
-model = Model().Operation("ARGMIN", input0, axis).To(output0)
-model.RelaxedExecution(True)
-
-Example(({
-    input0: [1.0, 2.0,
-             4.0, 3.0],
-}, {
-    output0: [0, 0],
-}))
diff --git a/nn/runtime/test/specs/V1_2/argmin_2_int32.mod.py b/nn/runtime/test/specs/V1_2/argmin_2_int32.mod.py
deleted file mode 100644
index 0dd11e3..0000000
--- a/nn/runtime/test/specs/V1_2/argmin_2_int32.mod.py
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-# Copyright (C) 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-input0 = Input("input0", "TENSOR_INT32", "{2, 2}")
-axis = Int32Scalar("axis", 0)
-output0 = Output("output", "TENSOR_INT32", "{2}")
-
-model = Model().Operation("ARGMIN", input0, axis).To(output0)
-
-Example(({
-    input0: [1, 2,
-             4, 3],
-}, {
-    output0: [0, 0],
-}))
diff --git a/nn/runtime/test/specs/V1_2/argmin_2_quant8.mod.py b/nn/runtime/test/specs/V1_2/argmin_2_quant8.mod.py
deleted file mode 100644
index 620836a..0000000
--- a/nn/runtime/test/specs/V1_2/argmin_2_quant8.mod.py
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-# Copyright (C) 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-input0 = Input("input0", "TENSOR_QUANT8_ASYMM", "{2, 2}, 1.0, 0")
-axis = Int32Scalar("axis", 0)
-output0 = Output("output", "TENSOR_INT32", "{2}")
-
-model = Model().Operation("ARGMIN", input0, axis).To(output0)
-
-Example(({
-    input0: [1, 2,
-             4, 3],
-}, {
-    output0: [0, 0],
-}))
diff --git a/nn/runtime/test/specs/V1_2/argmin_3_float.mod.py b/nn/runtime/test/specs/V1_2/argmin_3.mod.py
similarity index 85%
rename from nn/runtime/test/specs/V1_2/argmin_3_float.mod.py
rename to nn/runtime/test/specs/V1_2/argmin_3.mod.py
index 493dc1a..d3cbd76 100644
--- a/nn/runtime/test/specs/V1_2/argmin_3_float.mod.py
+++ b/nn/runtime/test/specs/V1_2/argmin_3.mod.py
@@ -22,9 +22,12 @@
 
 model = Model().Operation("ARGMIN", input0, axis).To(output0)
 
-Example(({
+quant8 = DataTypeConverter().Identify({
+    input0: ["TENSOR_QUANT8_ASYMM", 1.0, 0],
+})
+
+Example({
     input0: [1.0, 2.0,
              4.0, 3.0],
-}, {
     output0: [0, 1],
-}))
+}).AddVariations("relaxed", "float16", "int32", quant8)
diff --git a/nn/runtime/test/specs/V1_2/batch_to_space_v1_2.mod.py b/nn/runtime/test/specs/V1_2/batch_to_space_v1_2.mod.py
index 576f4be..e7247bf 100644
--- a/nn/runtime/test/specs/V1_2/batch_to_space_v1_2.mod.py
+++ b/nn/runtime/test/specs/V1_2/batch_to_space_v1_2.mod.py
@@ -31,7 +31,7 @@
 example = Example({
     i1: [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1],
     o1: [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1]
-}).AddNchw(i1, o1, layout).AddVariations("relaxed", quant8)
+}).AddNchw(i1, o1, layout).AddVariations("relaxed", "float16", quant8)
 
 
 # TEST 2: BATCH_TO_SPACE_NCHW_2, block_size = [2, 2]
@@ -49,4 +49,4 @@
 example = Example({
     i2: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
     o2: [1, 5, 2, 6, 9, 13, 10, 14, 3, 7, 4, 8, 11, 15, 12, 16]
-}).AddNchw(i2, o2, layout).AddVariations("relaxed", quant8)
+}).AddNchw(i2, o2, layout).AddVariations("relaxed", "float16", quant8)
diff --git a/nn/runtime/test/specs/V1_2/concat_mixed_quant.mod.py b/nn/runtime/test/specs/V1_2/concat_mixed_quant.mod.py
new file mode 100644
index 0000000..6610fea
--- /dev/null
+++ b/nn/runtime/test/specs/V1_2/concat_mixed_quant.mod.py
@@ -0,0 +1,56 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Adapted from tensorflow/lite/kernels/concatenation_test.cc
+
+input0 = Input("input0", "TENSOR_FLOAT32", "{2, 1, 2}")
+input1 = Input("input1", "TENSOR_FLOAT32", "{2, 1, 2}")
+input2 = Input("input2", "TENSOR_FLOAT32", "{2, 1, 2}")
+input3 = Input("input3", "TENSOR_FLOAT32", "{2, 1, 2}")
+axis = 2
+output0 = Output("output0", "TENSOR_FLOAT32", "{2, 1, 8}")
+
+model = Model().Operation("CONCATENATION", input0, input1, input2, input3, axis).To(output0)
+
+# FourInputsQuantizedMixedRange
+Example({
+    input0: [1.0, -3.0, -4.0, -7.0],
+    input1: [1.1, 3.1, 4.1, 7.1],
+    input2: [1.2, -3.2, -4.2, 7.2],
+    input3: [1.3, 3.3, 4.3, 7.3],
+    output0: [1.0, -3.0, 1.1, 3.1, 1.2, -3.2, 1.3, 3.3, -4.0, -7.0, 4.1, 7.1, -4.2, 7.2, 4.3, 7.3],
+}).AddVariations(DataTypeConverter().Identify({
+    input0: ["TENSOR_QUANT8_ASYMM", 0.084, 127],
+    input1: ["TENSOR_QUANT8_ASYMM", 0.05, 0],
+    input2: ["TENSOR_QUANT8_ASYMM", 0.089, 123],
+    input3: ["TENSOR_QUANT8_ASYMM", 0.029, 0],
+    output0: ["TENSOR_QUANT8_ASYMM", 0.1, 127],
+}), includeDefault=False)
+
+# FourInputsQuantizedMixedRangeClampingLogic
+Example({
+    input0: [1.0, -3.0, -4.0, -7.0],
+    input1: [1.1, 3.1, 4.1, 7.1],
+    input2: [1.2, -3.2, -4.2, 7.2],
+    input3: [1.3, 3.3, 4.3, 7.3],
+    output0: [1.0, -1.0, 1.0, 1.0, 1.0, -1.0, 1.0, 1.0, -1.0, -1.0, 1.0, 1.0, -1.0, 1.0, 1.0, 1.0]
+}).AddVariations(DataTypeConverter().Identify({
+    input0: ["TENSOR_QUANT8_ASYMM", 0.084, 127],
+    input1: ["TENSOR_QUANT8_ASYMM", 0.05, 0],
+    input2: ["TENSOR_QUANT8_ASYMM", 0.089, 123],
+    input3: ["TENSOR_QUANT8_ASYMM", 0.029, 0],
+    output0: ["TENSOR_QUANT8_ASYMM", 0.0078125, 127],
+}), includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_2/depth_to_space_v1_2.mod.py b/nn/runtime/test/specs/V1_2/depth_to_space_v1_2.mod.py
index 10fa153..8d9de4b 100644
--- a/nn/runtime/test/specs/V1_2/depth_to_space_v1_2.mod.py
+++ b/nn/runtime/test/specs/V1_2/depth_to_space_v1_2.mod.py
@@ -31,7 +31,7 @@
 example = Example({
     i1: [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1],
     o1: [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1]
-}).AddNchw(i1, o1, layout).AddVariations("relaxed", quant8)
+}).AddNchw(i1, o1, layout).AddVariations("relaxed", "float16", quant8)
 
 
 # TEST 2: DEPTH_TO_SPACE_NCHW_2, block_size = 2
@@ -49,7 +49,7 @@
 example = Example({
     i2: [1., 2., 5., 6., 3., 4., 7., 8., 9., 10., 13., 14., 11., 12., 15., 16.],
     o2: [1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16.]
-}).AddNchw(i2, o2, layout).AddVariations("relaxed", quant8)
+}).AddNchw(i2, o2, layout).AddVariations("relaxed", "float16", quant8)
 
 
 # TEST 3: DEPTH_TO_SPACE_NCHW_3, block_size = 2
@@ -73,4 +73,4 @@
          14,   24,  15,  25,  16,  26, 17,   27,
          18,   28,  19,  29, 110, 210, 111, 211,
         112,  212, 113, 213, 114, 214, 115, 215]
-}).AddNchw(i3, o3, layout).AddVariations("relaxed", quant8)
+}).AddNchw(i3, o3, layout).AddVariations("relaxed", "float16", quant8)
diff --git a/nn/runtime/test/specs/V1_2/depthwise_conv2d_v1_2.mod.py b/nn/runtime/test/specs/V1_2/depthwise_conv2d_v1_2.mod.py
index b813749..02a20d8 100644
--- a/nn/runtime/test/specs/V1_2/depthwise_conv2d_v1_2.mod.py
+++ b/nn/runtime/test/specs/V1_2/depthwise_conv2d_v1_2.mod.py
@@ -40,7 +40,7 @@
          11, 3, 7.4, 10.9,
          11, 3, 7.8, 11.5,
          11, 3, 8.0, 11.8]
-}).AddNchw(i1, o1, layout).AddInput(f1, b1).AddVariations("relaxed", quant8)
+}).AddNchw(i1, o1, layout).AddInput(f1, b1).AddVariations("relaxed", "float16", quant8)
 
 
 # TEST 2: DEPTHWISE_CONV2D_NCHW_2, pad = valid, stride = 1, cm = 2, act = none
@@ -62,7 +62,7 @@
 example = Example({
     i2: [1, 2, 7, 8, 3, 4, 9, 10, 5, 6, 11, 12],
     o2: [71, -34, 99, -20, 91, -26, 127, -4]
-}).AddNchw(i2, o2, layout).AddInput(f2, b2).AddVariations("relaxed", quant8)
+}).AddNchw(i2, o2, layout).AddInput(f2, b2).AddVariations("relaxed", "float16", quant8)
 
 
 # TEST 3: DEPTHWISE_CONV2D_NCHW_LARGE, pad = 0, stride = 1, cm = 1, act = none
@@ -84,7 +84,7 @@
 example = Example({
     i3: [10, 21, 10, 22, 10, 23, 10, 24],
     o3: [110, 246]
-}).AddNchw(i3, o3, layout).AddInput(f3, b3).AddVariations("relaxed", quant8)
+}).AddNchw(i3, o3, layout).AddInput(f3, b3).AddVariations("relaxed", "float16", quant8)
 
 
 # TEST 4: DEPTHWISE_CONV2D_NCHW_LARGE, pad = 0, stride = 1, cm = 1, act = none
@@ -109,4 +109,4 @@
          10, 23, 30, 0,
          10, 24, 40, 0],
     o4: [6010, 7046, 11000, 9000]
-}).AddNchw(i4, o4, layout).AddInput(f4, b4).AddVariations("relaxed", quant8)
+}).AddNchw(i4, o4, layout).AddInput(f4, b4).AddVariations("relaxed", "float16", quant8)
diff --git a/nn/runtime/test/specs/V1_2/pad_float16.mod.py b/nn/runtime/test/specs/V1_2/pad_float16.mod.py
new file mode 100644
index 0000000..7a6b29c
--- /dev/null
+++ b/nn/runtime/test/specs/V1_2/pad_float16.mod.py
@@ -0,0 +1,20 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT16", "{1, 2, 2, 1}")
+i2 = Parameter("op2", "TENSOR_INT32", "{4, 2}", [0, 0, 1, 1, 1, 1, 0, 0])
+i3 = Output("op3", "TENSOR_FLOAT16", "{1, 4, 4, 1}")
+model = model.Operation("PAD", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1.0, 2.0,
+           3.0, 4.0,]}
+
+output0 = {i3: # output 0
+           [0.0, 0.0, 0.0, 0.0,
+            0.0, 1.0, 2.0, 0.0,
+            0.0, 3.0, 4.0, 0.0,
+            0.0, 0.0, 0.0, 0.0]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/nn/runtime/test/specs/V1_2/pad_v2_1_float.mod.py b/nn/runtime/test/specs/V1_2/pad_v2_1_float.mod.py
index 2833b14..f6caa0f 100644
--- a/nn/runtime/test/specs/V1_2/pad_v2_1_float.mod.py
+++ b/nn/runtime/test/specs/V1_2/pad_v2_1_float.mod.py
@@ -32,4 +32,4 @@
               9.3, 4.0, 5.0, 6.0, 9.3, 9.3, 9.3,
               9.3, 9.3, 9.3, 9.3, 9.3, 9.3, 9.3,
               9.3, 9.3, 9.3, 9.3, 9.3, 9.3, 9.3],
-}))
+})).AddVariations("float16")
diff --git a/nn/runtime/test/specs/V1_2/reshape_float16.mod.py b/nn/runtime/test/specs/V1_2/reshape_float16.mod.py
new file mode 100644
index 0000000..f54cba6
--- /dev/null
+++ b/nn/runtime/test/specs/V1_2/reshape_float16.mod.py
@@ -0,0 +1,18 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT16", "{1, 1, 3, 3}") # a line of 3 pixels, 3 components/pixel
+i2 = Parameter("op2", "TENSOR_INT32", "{1}", [-1]) # another vector of 2 float32s
+i3 = Output("op3", "TENSOR_FLOAT16", "{9}")
+model = model.Operation("RESHAPE", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1, 2, 3,
+           4, 5, 6,
+           7, 8, 9]}
+
+output0 = {i3: # output 0
+           [1, 2, 3, 4, 5, 6, 7, 8, 9]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/nn/runtime/test/specs/V1_2/resize_bilinear_v1_2.mod.py b/nn/runtime/test/specs/V1_2/resize_bilinear_v1_2.mod.py
index c7760b0..1ff400d 100644
--- a/nn/runtime/test/specs/V1_2/resize_bilinear_v1_2.mod.py
+++ b/nn/runtime/test/specs/V1_2/resize_bilinear_v1_2.mod.py
@@ -27,7 +27,7 @@
     o1: [1.0, 1.0, 1.0,
          1.666666667, 1.666666667, 1.666666667,
          2.0, 2.0, 2.0]
-}).AddNchw(i1, o1, layout).AddRelaxed()
+}).AddNchw(i1, o1, layout).AddVariations("relaxed", "float16")
 
 
 # TEST 2: RESIZE_BILINEAR_NCHW_2, h = 3, w = 3
@@ -41,4 +41,4 @@
     o2: [3, 4, 5, 8, 6, 10,
          7, 8, 9, 12, 10, 14,
          9, 10, 11, 14, 12, 16,]
-}).AddNchw(i2, o2, layout).AddRelaxed()
+}).AddNchw(i2, o2, layout).AddVariations("relaxed", "float16")
diff --git a/nn/runtime/test/specs/V1_2/roi_pooling.mod.py b/nn/runtime/test/specs/V1_2/roi_pooling.mod.py
new file mode 100644
index 0000000..243bb76
--- /dev/null
+++ b/nn/runtime/test/specs/V1_2/roi_pooling.mod.py
@@ -0,0 +1,94 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TEST 1: ROI_ALIGN_1, outputShape = [2, 2], spatialScale = 0.5, samplingRatio = 4
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 4, 4, 1}")
+roi1 = Input("roi", "TENSOR_FLOAT32", "{4, 4}")
+o1 = Output("out", "TENSOR_FLOAT32", "{4, 2, 2, 1}")
+Model().Operation("ROI_POOLING", i1, roi1, [2, 2], 0.5, layout).To(o1)
+
+quant8 = DataTypeConverter().Identify({
+    i1: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
+    o1: ("TENSOR_QUANT8_ASYMM", 0.25, 128)
+})
+
+# Instantiate an example
+Example({
+    i1: [
+        -10, -1,  4, -5,
+        -8, -2,  9,  1,
+         7, -2,  3, -7,
+        -2, 10, -3,  5
+    ],
+    roi1: [
+        2, 2, 4, 4,
+        0, 0, 6, 6,
+        2, 0, 4, 6,
+        0, 2, 6, 4
+    ],
+    o1: [
+        -2, 9, -2, 3,
+        -1, 9, 10, 5,
+        -1, 9, 10, 3,
+        -2, 9,  7, 3
+    ]
+}).AddNchw(i1, o1, layout).AddVariations("relaxed", quant8)
+
+
+# TEST 2: ROI_ALIGN_2, outputShape = [2, 3], spatialScale = 0.25, samplingRatio = 4
+i2 = Input("in", "TENSOR_FLOAT32", "{2, 4, 8, 2}")
+roi2 = Input("roi", "TENSOR_FLOAT32", "{4, 5}")
+o2 = Output("out", "TENSOR_FLOAT32", "{4, 2, 3, 2}")
+Model().Operation("ROI_POOLING", i2, roi2, [2, 3], 0.25, layout).To(o2)
+
+quant8 = DataTypeConverter().Identify({
+    i2: ("TENSOR_QUANT8_ASYMM", 0.04, 0),
+    o2: ("TENSOR_QUANT8_ASYMM", 0.04, 0)
+})
+
+# Instantiate an example
+Example({
+    i2: [
+        8.84, 8.88, 7.41, 5.60, 9.95, 4.37, 0.10, 7.64, 6.50, 9.47,
+        7.55, 3.00, 0.89, 3.01, 6.30, 4.40, 1.64, 6.74, 6.16, 8.60,
+        5.85, 3.17, 7.12, 6.79, 5.77, 6.62, 5.13, 8.44, 5.08, 7.12,
+        2.84, 1.19, 8.37, 0.90, 7.86, 9.69, 1.97, 1.31, 4.42, 9.89,
+        0.18, 9.00, 9.30, 0.44, 5.05, 6.47, 1.09, 9.50, 1.30, 2.18,
+        2.05, 7.74, 7.66, 0.65, 4.18, 7.14, 5.35, 7.90, 1.04, 1.47,
+        9.01, 0.95, 4.07, 0.65,
+        5.47, 2.64, 0.86, 4.86, 2.38, 2.45, 8.77, 0.06, 3.60, 9.28,
+        5.84, 8.97, 6.89, 1.43, 3.90, 5.91, 7.40, 9.25, 3.12, 4.92,
+        1.87, 3.22, 9.50, 6.73, 2.07, 7.30, 3.07, 4.97, 0.24, 8.91,
+        1.09, 0.27, 7.29, 6.94, 2.31, 6.88, 4.33, 1.37, 0.86, 0.46,
+        6.07, 3.81, 0.86, 6.99, 4.36, 1.92, 8.19, 3.57, 7.90, 6.78,
+        4.64, 6.82, 6.18, 9.63, 2.63, 2.33, 1.36, 2.70, 9.99, 9.85,
+        8.06, 4.80, 7.80, 5.43
+    ],
+    roi2: [
+        0, 4, 4, 24, 8,
+        0, 4, 4, 28, 12,
+        1, 7, 1, 25, 11,   # test rounding
+        1, 1, 7,  5, 11    # test roi with shape smaller than output
+    ],
+    o2: [
+        6.16, 8.60, 7.12, 6.79, 5.13, 8.44, 7.86, 9.69, 4.42, 9.89, 9.30, 6.47,
+        7.86, 9.89, 9.30, 9.89, 9.30, 9.50, 7.86, 9.89, 9.30, 9.89, 9.30, 9.50,
+        9.50, 6.73, 9.50, 9.28, 6.89, 8.97, 6.18, 9.63, 9.99, 9.85, 9.99, 9.85,
+        7.29, 6.94, 7.29, 6.94, 2.31, 6.88, 7.90, 6.78, 7.90, 6.82, 4.64, 6.82
+    ]
+}).AddNchw(i2, o2, layout).AddVariations("relaxed", quant8)
diff --git a/nn/runtime/test/specs/V1_2/slice.mod.py b/nn/runtime/test/specs/V1_2/slice.mod.py
new file mode 100644
index 0000000..6326f2e
--- /dev/null
+++ b/nn/runtime/test/specs/V1_2/slice.mod.py
@@ -0,0 +1,107 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import collections
+
+TestCase = collections.namedtuple("TestCase", [
+    "inp", "inp_data", "begin", "begin_data", "size", "size_data", "output",
+    "output_data"
+])
+
+test_cases = [
+    TestCase(
+        inp=Input("input", "TENSOR_FLOAT32", "{4}"),
+        inp_data=[1, 2, 3, 4],
+        begin=Input("begin", "TENSOR_INT32", "{1}"),
+        begin_data=[1],
+        size=Input("size", "TENSOR_INT32", "{1}"),
+        size_data=[2],
+        output=Output("output", "TENSOR_FLOAT32", "{2}"),
+        output_data=[2, 3]),
+    TestCase(
+        inp=Input("input", "TENSOR_FLOAT32", "{2,3}"),
+        inp_data=[1, 2, 3, 4, 5, 6],
+        begin=Input("begin", "TENSOR_INT32", "{2}"),
+        begin_data=[1, 0],
+        size=Input("size", "TENSOR_INT32", "{2}"),
+        size_data=[1, 2],
+        output=Output("output", "TENSOR_FLOAT32", "{1, 2}"),
+        output_data=[4, 5]),
+    TestCase(
+        inp=Input("input", "TENSOR_FLOAT32", "{2,3,2}"),
+        inp_data=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
+        begin=Input("begin", "TENSOR_INT32", "{3}"),
+        begin_data=[0, 0, 0],
+        size=Input("size", "TENSOR_INT32", "{3}"),
+        size_data=[2, 3, 2],
+        output=Output("output", "TENSOR_FLOAT32", "{2, 3, 2}"),
+        output_data=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]),
+    TestCase(
+        inp=Input("input", "TENSOR_FLOAT32", "{4, 1, 1, 1}"),
+        inp_data=[1, 2, 3, 4],
+        begin=Input("begin", "TENSOR_INT32", "{4}"),
+        begin_data=[1, 0, 0, 0],
+        size=Input("size", "TENSOR_INT32", "{4}"),
+        size_data=[3, 1, 1, 1],
+        output=Output("output", "TENSOR_FLOAT32", "{3, 1, 1, 1}"),
+        output_data=[2, 3, 4]),
+    TestCase(
+        inp=Input("input", "TENSOR_INT32", "{3, 2, 3, 1}"),
+        inp_data=[1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6],
+        begin=Input("begin", "TENSOR_INT32", "{4}"),
+        begin_data=[1, 0, 0, 0],
+        size=Input("size", "TENSOR_INT32", "{4}"),
+        size_data=[1, 1, 3, 1],
+        output=Output("output", "TENSOR_INT32", "{1, 1, 3, 1}"),
+        output_data=[3, 3, 3]),
+    TestCase(
+        inp=Input("input", "TENSOR_INT32", "{3, 2, 3, 1}"),
+        inp_data=[1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6],
+        begin=Input("begin", "TENSOR_INT32", "{4}"),
+        begin_data=[1, 0, 0, 0],
+        size=Input("size", "TENSOR_INT32", "{4}"),
+        size_data=[2, 1, 3, 1],
+        output=Output("output", "TENSOR_INT32", "{2, 1, 3, 1}"),
+        output_data=[3, 3, 3, 5, 5, 5]),
+    TestCase(
+        inp=Input("input", "TENSOR_QUANT8_ASYMM", "{3, 2, 3, 1}, 2.0, 128"),
+        inp_data=[1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6],
+        begin=Input("begin", "TENSOR_INT32", "{4}"),
+        begin_data=[1, 0, 0, 0],
+        size=Input("size", "TENSOR_INT32", "{4}"),
+        size_data=[2, 1, 3, 1],
+        output=Output("output", "TENSOR_QUANT8_ASYMM", "{2, 1, 3, 1}, 2.0, 128"),
+        output_data=[3, 3, 3, 5, 5, 5]),
+    TestCase(
+        inp=Input("input", "TENSOR_INT32", "{3, 2, 3, 1}"),
+        inp_data=[1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6],
+        begin=Input("begin", "TENSOR_INT32", "{4}"),
+        begin_data=[1, 0, 0, 0],
+        size=Input("size", "TENSOR_INT32", "{4}"),
+        size_data=[2, 1, -1, 1],
+        output=Output("output", "TENSOR_INT32", "{2, 1, 3, 1}"),
+        output_data=[3, 3, 3, 5, 5, 5]),
+]
+
+for test_case in test_cases:
+  model = Model().Operation("SLICE", test_case.inp, test_case.begin,
+                            test_case.size).To(test_case.output)
+  Example({
+      test_case.inp: test_case.inp_data,
+      test_case.begin: test_case.begin_data,
+      test_case.size: test_case.size_data,
+      test_case.output: test_case.output_data,
+  },
+          model=model).AddVariations("relaxed", "float16")
diff --git a/nn/runtime/test/specs/V1_2/space_to_batch_v1_2.mod.py b/nn/runtime/test/specs/V1_2/space_to_batch_v1_2.mod.py
index 73ce1eb..dfe234f 100644
--- a/nn/runtime/test/specs/V1_2/space_to_batch_v1_2.mod.py
+++ b/nn/runtime/test/specs/V1_2/space_to_batch_v1_2.mod.py
@@ -32,7 +32,7 @@
 example = Example({
     i1: [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1],
     o1: [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1]
-}).AddNchw(i1, o1, layout).AddVariations("relaxed", quant8)
+}).AddNchw(i1, o1, layout).AddVariations("relaxed", "float16", quant8)
 
 
 # TEST 2: SPACE_TO_BATCH_NCHW_2, block_size = [2, 2]
@@ -50,7 +50,7 @@
 example = Example({
     i2: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
     o2: [1, 3, 9, 11, 2, 4, 10, 12, 5, 7, 13, 15, 6, 8, 14, 16]
-}).AddNchw(i2, o2, layout).AddVariations("relaxed", quant8)
+}).AddNchw(i2, o2, layout).AddVariations("relaxed", "float16", quant8)
 
 
 # TEST 3: SPACE_TO_BATCH_NCHW_3, block_size = [3, 2]
@@ -70,7 +70,7 @@
     i3: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
     o3: [0, 0, 0, 5, 0, 0, 0, 6, 0, 1, 0, 7,
          0, 2, 0, 8, 0, 3, 0, 9, 0, 4, 0, 10]
-}).AddNchw(i3, o3, layout).AddVariations("relaxed", quant8)
+}).AddNchw(i3, o3, layout).AddVariations("relaxed", "float16", quant8)
 
 
 # TEST 4: SPACE_TO_BATCH_NCHW_4, block_size = [3, 2]
@@ -91,4 +91,4 @@
     o4: [0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0,
          0, 1, 0, 0, 0, 7, 0, 0, 0, 2, 0, 0, 0, 8, 0, 0,
          0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0]
-}).AddNchw(i4, o4, layout).AddVariations("relaxed", quant8)
+}).AddNchw(i4, o4, layout).AddVariations("relaxed", "float16", quant8)
diff --git a/nn/runtime/test/specs/V1_2/space_to_depth_v1_2.mod.py b/nn/runtime/test/specs/V1_2/space_to_depth_v1_2.mod.py
index 017d2c9..9f1a799 100644
--- a/nn/runtime/test/specs/V1_2/space_to_depth_v1_2.mod.py
+++ b/nn/runtime/test/specs/V1_2/space_to_depth_v1_2.mod.py
@@ -31,7 +31,7 @@
 example = Example({
     i1: [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1],
     o1: [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1]
-}).AddNchw(i1, o1, layout).AddVariations("relaxed", quant8)
+}).AddNchw(i1, o1, layout).AddVariations("relaxed", "float16", quant8)
 
 
 # TEST 2: SPACE_TO_DEPTH_NCHW_2, block_size = 2
@@ -49,7 +49,7 @@
 example = Example({
     i2: [1., 2., 5., 6., 3., 4., 7., 8., 9., 10., 13., 14., 11., 12., 15., 16.],
     o2: [1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16.]
-}).AddNchw(i2, o2, layout).AddVariations("relaxed", quant8)
+}).AddNchw(i2, o2, layout).AddVariations("relaxed", "float16", quant8)
 
 
 # TEST 3: SPACE_TO_DEPTH_NCHW_3, block_size = 2
@@ -73,4 +73,4 @@
          12,   22,  13,  23, 16,   26,  17,  27,
          18,   28,  19,  29, 112, 212, 113, 213,
          110, 210, 111, 211, 114, 214, 115, 215]
-}).AddNchw(i3, o3, layout).AddVariations("relaxed", quant8)
+}).AddNchw(i3, o3, layout).AddVariations("relaxed", "float16", quant8)
diff --git a/nn/runtime/test/specs/V1_2/squeeze_float16.mod.py b/nn/runtime/test/specs/V1_2/squeeze_float16.mod.py
new file mode 100644
index 0000000..e5f18a5
--- /dev/null
+++ b/nn/runtime/test/specs/V1_2/squeeze_float16.mod.py
@@ -0,0 +1,16 @@
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT16", "{4, 1, 1, 2}")
+squeezeDims = Parameter("squeezeDims", "TENSOR_INT32", "{2}", [1, 2])
+output = Output("output", "TENSOR_FLOAT16", "{4, 2}")
+
+model = model.Operation("SQUEEZE", i1, squeezeDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1]}
+
+output0 = {output: # output 0
+           [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/nn/runtime/test/specs/V1_2/strided_slice_float16.mod.py b/nn/runtime/test/specs/V1_2/strided_slice_float16.mod.py
new file mode 100644
index 0000000..88735f5
--- /dev/null
+++ b/nn/runtime/test/specs/V1_2/strided_slice_float16.mod.py
@@ -0,0 +1,23 @@
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT16", "{2, 3}")
+begins = Parameter("begins", "TENSOR_INT32", "{2}", [0, 0])
+ends = Parameter("ends", "TENSOR_INT32", "{2}", [2, 3])
+strides = Parameter("strides", "TENSOR_INT32", "{2}", [2, 2])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_FLOAT16", "{1, 2}")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1.0, 2.0, 3.0,
+           4.0, 5.0, 6.0]}
+
+output0 = {output: # output 0
+           [1.0, 3.0]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/nn/runtime/test/specs/V1_2/transpose_float16.mod.py b/nn/runtime/test/specs/V1_2/transpose_float16.mod.py
new file mode 100644
index 0000000..79b3796
--- /dev/null
+++ b/nn/runtime/test/specs/V1_2/transpose_float16.mod.py
@@ -0,0 +1,18 @@
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT16", "{1, 2, 2, 1}")
+perms = Parameter("perms", "TENSOR_INT32", "{4}", [0, 2, 1, 3])
+output = Output("output", "TENSOR_FLOAT16", "{1, 2, 2, 1}")
+
+model = model.Operation("TRANSPOSE", i1, perms).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1.0, 2.0,
+           3.0, 4.0]}
+
+output0 = {output: # output 0
+          [1.0, 3.0,
+           2.0, 4.0]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/nn/tools/test_generator/cts_generator.py b/nn/tools/test_generator/cts_generator.py
index 07d6d18..e818695 100755
--- a/nn/tools/test_generator/cts_generator.py
+++ b/nn/tools/test_generator/cts_generator.py
@@ -213,7 +213,8 @@
 
 # Dump Example file for Cts tests
 def DumpCtsExample(example, example_fd):
-    print("std::vector<MixedTypedExample> %s = {"%(example.examplesName), file=example_fd)
+    print("std::vector<MixedTypedExample>& get_%s() {" % (example.examplesName), file=example_fd)
+    print("static std::vector<MixedTypedExample> %s = {" % (example.examplesName), file=example_fd)
     for inputFeedDict, outputFeedDict in example.feedDicts:
         print ('// Begin of an example', file = example_fd)
         print ('{\n.operands = {', file = example_fd)
@@ -226,6 +227,8 @@
           print ('.expectedMultinomialDistributionTolerance = %f' %
                  example.expectedMultinomialDistributionTolerance, file = example_fd)
         print ('}, // End of an example', file = example_fd)
+    print("};", file=example_fd)
+    print("return %s;" % (example.examplesName), file=example_fd)
     print("};\n", file=example_fd)
 
 # Dump Test file for Cts tests
@@ -234,7 +237,7 @@
 TEST_F(GeneratedTests, {test_name}) {{
     execute({namespace}::{create_model_name},
             {namespace}::{is_ignored_name},
-            {namespace}::{examples_name}{log_file});\n}}\n"""
+            {namespace}::get_{examples_name}(){log_file});\n}}\n"""
     print(testTemplate.format(
         test_name=str(example.testName),
         namespace=tg.FileNames.specName,
diff --git a/nn/tools/test_generator/vts_generator.py b/nn/tools/test_generator/vts_generator.py
index 68403c8..ca1fe2f 100755
--- a/nn/tools/test_generator/vts_generator.py
+++ b/nn/tools/test_generator/vts_generator.py
@@ -27,6 +27,7 @@
 import argparse
 from functools import reduce
 import math
+import numpy as np
 import os
 import re
 import struct
@@ -121,6 +122,10 @@
             binit += w.value
         elif ty == "BOOL":
             binit += [1 if x else 0 for x in w.value]
+        elif ty == "TENSOR_FLOAT16":
+            for f in w.value:
+                # The pack format for float16 is not available until Python 3.6.
+                binit += [int(x) for x in np.float16(f).tostring()]
         elif ty in {"TENSOR_FLOAT32", "FLOAT32", "TENSOR_INT32", "INT32"}:
             fmt = "f" if (ty == "TENSOR_FLOAT32" or ty == "FLOAT32") else "i"
             for f in w.value:
@@ -242,11 +247,11 @@
   generated_tests::Execute(device,
                            {namespace}::{create_model_name},
                            {namespace}::{is_ignored_name},
-                           {namespace}::{examples_name});\n}}
+                           {namespace}::get_{examples_name}());\n}}
 
 TEST_F(ValidationTest, {test_name}) {{
   const Model model = {namespace}::{create_model_name}();
-  const std::vector<Request> requests = createRequests({namespace}::{examples_name});
+  const std::vector<Request> requests = createRequests({namespace}::get_{examples_name}());
   validateModel(model);
   validateRequests(model, requests);
 }}\n