IVGCVSW-5396 TfLiteDelegate: Implement the Resize operators

* Added resize biliniear and nearest neighbour operator
  support to the tflite delegate

Signed-off-by: Jan Eilers <jan.eilers@arm.com>
Change-Id: Id0113d6b865ea282c6f4de55e8419a6244a35f0e
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index 2ee00f3..0dc72c2 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -102,7 +102,10 @@
         src/test/Pooling2dTest.cpp
         src/test/Pooling2dTestHelper.hpp
         src/test/QuantizationTest.cpp
-        src/test/QuantizationTestHelper.hpp)
+        src/test/QuantizationTestHelper.hpp
+        src/test/ResizeTest.cpp
+        src/test/ResizeTestHelper.hpp
+        src/test/TestUtils.hpp)
 
     add_executable(DelegateUnitTests ${armnnDelegate_unittest_sources})
     target_include_directories(DelegateUnitTests PRIVATE third-party)
diff --git a/delegate/src/Resize.hpp b/delegate/src/Resize.hpp
index be40b64..f91cdb0 100644
--- a/delegate/src/Resize.hpp
+++ b/delegate/src/Resize.hpp
@@ -5,21 +5,194 @@
 
 #pragma once
 
+#include "DelegateUtils.hpp"
+
+#include <armnn/Descriptors.hpp>
+
 #include <tensorflow/lite/builtin_ops.h>
 #include <tensorflow/lite/c/builtin_op_data.h>
 #include <tensorflow/lite/c/common.h>
 #include <tensorflow/lite/minimal_logging.h>
+#include <tensorflow/lite/kernels/internal/tensor_ctypes.h>
 
 namespace armnnDelegate
 {
 
+
+
+TfLiteStatus ValidateResizeOperator(DelegateData& delegateData,
+                                    TfLiteContext* tfLiteContext,
+                                    const armnn::TensorInfo& inputInfo,
+                                    const armnn::TensorInfo& outputInfo,
+                                    const armnn::ResizeDescriptor& descriptor)
+{
+    bool isSupported = false;
+    FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                               tfLiteContext,
+                               IsResizeSupported,
+                               delegateData.m_Backends,
+                               isSupported,
+                               inputInfo,
+                               outputInfo,
+                               descriptor);
+
+    return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
 TfLiteStatus VisitResizeOperator(DelegateData& delegateData,
                                  TfLiteContext* tfLiteContext,
                                  TfLiteNode* tfLiteNode,
                                  int nodeIndex,
                                  int32_t resizeOperatorCode)
 {
-    return kTfLiteError;
+    TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+    TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+    const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+
+    // The first input contains the data of the image that should be resized [batch, height, width, channels]
+    const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+    if (IsDynamicTensor(tfLiteInputTensor))
+    {
+        TF_LITE_MAYBE_KERNEL_LOG(
+            tfLiteContext,
+            "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+            resizeOperatorCode, nodeIndex);
+        return kTfLiteError;
+    }
+
+    // The second input contains a size tensor. The size tensor contains two integer values
+    // that describe the new height and width of the image [new_height, new_width]
+    const TfLiteTensor& tfLiteSizeTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+    if (IsDynamicTensor(tfLiteSizeTensor))
+    {
+        TF_LITE_MAYBE_KERNEL_LOG(
+            tfLiteContext,
+            "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+            resizeOperatorCode, nodeIndex);
+        return kTfLiteError;
+    }
+
+    // The output tensor should have the shape [batch, new_height, new_width, channels]
+    const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+    if (IsDynamicTensor(tfLiteOutputTensor))
+    {
+        TF_LITE_MAYBE_KERNEL_LOG(
+            tfLiteContext,
+            "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
+            resizeOperatorCode, nodeIndex);
+        return kTfLiteError;
+    }
+
+    const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+    const armnn::TensorInfo& sizeTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteSizeTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+
+    std::string layerName("Resize");
+
+    // Fill descriptor
+    armnn::ResizeDescriptor desc;
+    switch (resizeOperatorCode)
+    {
+        case kTfLiteBuiltinResizeBilinear:
+        {
+            desc.m_Method = armnn::ResizeMethod::Bilinear;
+
+            layerName += "Bilinear:" + nodeIndex;
+
+            TfLiteResizeBilinearParams* biliniarOptions =
+                    reinterpret_cast<TfLiteResizeBilinearParams*>(tfLiteNode->builtin_data);
+
+            desc.m_AlignCorners = biliniarOptions->align_corners;
+            desc.m_HalfPixelCenters = biliniarOptions->half_pixel_centers;
+            break;
+        }
+        case kTfLiteBuiltinResizeNearestNeighbor:
+        {
+            desc.m_Method =  armnn::ResizeMethod::NearestNeighbor;
+            layerName += "NearestNeighbor:" + nodeIndex;
+
+            TfLiteResizeNearestNeighborParams* nearestNeighborOptions =
+                    reinterpret_cast<TfLiteResizeNearestNeighborParams*>(tfLiteNode->builtin_data);
+
+            desc.m_AlignCorners = nearestNeighborOptions->align_corners;
+            desc.m_HalfPixelCenters = nearestNeighborOptions->half_pixel_centers;
+            break;
+        }
+        default:
+        {
+            TF_LITE_MAYBE_KERNEL_LOG(
+                    tfLiteContext,
+                    "TfLiteArmnnDelegate: Unknown TfLite built in operation for Resize. Given operator: #%d node #%d: ",
+                    resizeOperatorCode, nodeIndex);
+            return kTfLiteError;
+        }
+    }
+
+    // In armnn the values of the size input tensor [new_hight, new_width] is saved in the operator
+    // descriptor. We have to read it from the input tensor and write it to the descriptor.
+
+    auto* sizeTensorDataPtr = tflite::GetTensorData<int32_t>(&tfLiteSizeTensor);
+    auto sizeTensorNumDimensions = tfLiteSizeTensor.dims->size;
+    // The size tensor is only a 1D tensor -> [new_hight, new width]
+    if (sizeTensorNumDimensions != 1)
+    {
+        TF_LITE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnDelegate: The Size-Input-Tensor of the Resize operation is not allowed to be a "
+                "dynamic tensor. Operator: #%d node #%d: ",
+                resizeOperatorCode, nodeIndex);
+        return kTfLiteError;
+    }
+
+    // Get number of values in the size tensor
+    auto sizeTensorNumValues = tfLiteSizeTensor.dims->data[0];
+    if (sizeTensorNumValues == 0)
+    {
+        TF_LITE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnDelegate: The Size-Input-Tensor of the Resize operation is not allowed to be a "
+                "dynamic tensor. Operator: #%d node #%d: ",
+                resizeOperatorCode, nodeIndex);
+        return kTfLiteError;
+    }
+    else if (sizeTensorNumValues != 2)
+    {
+        TF_LITE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnDelegate: The Size-Input-Tensor of the Resize operation requires to "
+                "have a dimension of 2 [new_hight, new width] but a tensor with a dimension of #%d was given. "
+                "Operator: #%d node #%d: ",
+                sizeTensorNumValues, resizeOperatorCode, nodeIndex);
+        return kTfLiteError;
+    }
+    // get size tensor data
+    std::vector<int32_t> sizeTensorData(sizeTensorDataPtr, sizeTensorDataPtr+sizeTensorNumValues);
+
+    desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
+    desc.m_TargetWidth  = static_cast<uint32_t> (sizeTensorData[1]);
+    desc.m_DataLayout   = armnn::DataLayout::NHWC;
+
+    // No network pointer indicates that only support for this operator should be checked
+    if (!delegateData.m_Network)
+    {
+        return ValidateResizeOperator(delegateData,
+                                      tfLiteContext,
+                                      inputTensorInfo,
+                                      outputTensorInfo,
+                                      desc);
+    }
+
+
+    armnn::IConnectableLayer* resizeLayer = nullptr;
+    resizeLayer = delegateData.m_Network->AddResizeLayer(desc, layerName.c_str());
+
+    armnn::IOutputSlot& outputSlot = resizeLayer->GetOutputSlot(0);
+    outputSlot.SetTensorInfo(outputTensorInfo);
+
+    ARMNN_ASSERT(resizeLayer != nullptr);
+
+    return Connect(resizeLayer, tfLiteNode, delegateData);
 }
 
 } // namespace armnnDelegate
diff --git a/delegate/src/test/ResizeTest.cpp b/delegate/src/test/ResizeTest.cpp
new file mode 100644
index 0000000..394ad6c
--- /dev/null
+++ b/delegate/src/test/ResizeTest.cpp
@@ -0,0 +1,134 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ResizeTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void ResizeBiliniarFloat32Test(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<float> input1Values
+        {
+            0.0f, 1.0f, 2.0f,
+            3.0f, 4.0f, 5.0f,
+            6.0f, 7.0f, 8.0f
+        };
+    const std::vector<int32_t> input2NewShape { 5, 5 };
+
+    // Calculate output data
+    std::vector<float> expectedOutputValues
+        {
+            0.0f, 0.6f, 1.2f, 1.8f, 2.0f,
+            1.8f, 2.4f, 3.0f, 3.6f, 3.8f,
+            3.6f, 4.2f, 4.8f, 5.4f, 5.6f,
+            5.4f, 6.0f, 6.6f, 7.2f, 7.4f,
+            6.0f, 6.6f, 7.2f, 7.8f, 8.0f
+        };
+
+    const std::vector<int32_t> input1Shape { 1, 3, 3, 1 };
+    const std::vector<int32_t> input2Shape { 2 };
+    const std::vector<int32_t> expectedOutputShape = input2NewShape;
+
+    ResizeFP32TestImpl(tflite::BuiltinOperator_RESIZE_BILINEAR,
+                       backends,
+                       input1Values,
+                       input1Shape,
+                       input2NewShape,
+                       input2Shape,
+                       expectedOutputValues,
+                       expectedOutputShape);
+}
+
+void ResizeNearestNeighbourFloat32Test(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<float> input1Values {  1.0f, 2.0f, 3.0f, 4.0f }
+    ;
+    const std::vector<int32_t> input2NewShape { 1, 1 };
+
+    // Calculate output data
+    std::vector<float> expectedOutputValues { 1.0f };
+
+    const std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
+    const std::vector<int32_t> input2Shape { 2 };
+    const std::vector<int32_t> expectedOutputShape = input2NewShape;
+
+    ResizeFP32TestImpl(tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
+                       backends,
+                       input1Values,
+                       input1Shape,
+                       input2NewShape,
+                       input2Shape,
+                       expectedOutputValues,
+                       expectedOutputShape);
+}
+
+TEST_SUITE("ResizeTests_GpuAccTests")
+{
+
+TEST_CASE ("Resize_Biliniar_Float32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ResizeBiliniarFloat32Test(backends);
+}
+
+TEST_CASE ("Resize_NearestNeighbour_Float32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ResizeNearestNeighbourFloat32Test(backends);
+}
+
+} // TEST_SUITE("ResizeTests_GpuAccTests")
+
+
+TEST_SUITE("ResizeTests_CpuAccTests")
+{
+
+TEST_CASE ("Resize_Biliniar_Float32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ResizeBiliniarFloat32Test(backends);
+}
+
+TEST_CASE ("Resize_NearestNeighbour_Float32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ResizeNearestNeighbourFloat32Test(backends);
+}
+
+} // TEST_SUITE("ResizeTests_CpuAccTests")
+
+
+TEST_SUITE("ResizeTests_CpuRefTests")
+{
+
+TEST_CASE ("Resize_Biliniar_Float32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ResizeBiliniarFloat32Test(backends);
+}
+
+TEST_CASE ("Resize_NearestNeighbour_Float32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ResizeNearestNeighbourFloat32Test(backends);
+}
+
+} // TEST_SUITE("ResizeTests_CpuRefTests")
+
+} // namespace armnnDelegate
diff --git a/delegate/src/test/ResizeTestHelper.hpp b/delegate/src/test/ResizeTestHelper.hpp
new file mode 100644
index 0000000..1e9d3bc
--- /dev/null
+++ b/delegate/src/test/ResizeTestHelper.hpp
@@ -0,0 +1,192 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+std::vector<char> CreateResizeTfLiteModel(tflite::BuiltinOperator operatorCode,
+                                          tflite::TensorType inputTensorType,
+                                          const std::vector <int32_t>& inputTensorShape,
+                                          const std::vector <int32_t>& sizeTensorData,
+                                          const std::vector <int32_t>& sizeTensorShape,
+                                          const std::vector <int32_t>& outputTensorShape)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector(
+                                           reinterpret_cast<const uint8_t*>(sizeTensorData.data()),
+                                           sizeof(int32_t) * sizeTensorData.size())));
+
+    std::array<flatbuffers::Offset<Tensor>, 3> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(), inputTensorShape.size()),
+                              inputTensorType,
+                              0,
+                              flatBufferBuilder.CreateString("input_tensor"));
+
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(sizeTensorShape.data(),
+                                                                      sizeTensorShape.size()),
+                              TensorType_INT32,
+                              1,
+                              flatBufferBuilder.CreateString("size_input_tensor"));
+
+    tensors[2] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                      outputTensorShape.size()),
+                              inputTensorType,
+                              0,
+                              flatBufferBuilder.CreateString("output_tensor"));
+
+    // Create Operator
+    tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_NONE;
+    flatbuffers::Offset<void> operatorBuiltinOption = 0;
+    switch (operatorCode)
+    {
+        case BuiltinOperator_RESIZE_BILINEAR:
+        {
+            operatorBuiltinOption = CreateResizeBilinearOptions(flatBufferBuilder, false, false).Union();
+            operatorBuiltinOptionsType = tflite::BuiltinOptions_ResizeBilinearOptions;
+            break;
+        }
+        case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR:
+        {
+            operatorBuiltinOption = CreateResizeNearestNeighborOptions(flatBufferBuilder, false, false).Union();
+            operatorBuiltinOptionsType = tflite::BuiltinOptions_ResizeNearestNeighborOptions;
+            break;
+        }
+        default:
+            break;
+    }
+
+    const std::vector<int> operatorInputs{{0, 1}};
+    const std::vector<int> operatorOutputs{{2}};
+    flatbuffers::Offset <Operator> resizeOperator =
+        CreateOperator(flatBufferBuilder,
+                       0,
+                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                       operatorBuiltinOptionsType,
+                       operatorBuiltinOption);
+
+    const std::vector<int> subgraphInputs{{0, 1}};
+    const std::vector<int> subgraphOutputs{{2}};
+    flatbuffers::Offset <SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                       flatBufferBuilder.CreateVector(&resizeOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+        flatBufferBuilder.CreateString("ArmnnDelegate: Resize Biliniar Operator Model");
+    flatbuffers::Offset <OperatorCode> opCode = CreateOperatorCode(flatBufferBuilder, operatorCode);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(&opCode, 1),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+void ResizeFP32TestImpl(tflite::BuiltinOperator operatorCode,
+                        std::vector<armnn::BackendId>& backends,
+                        std::vector<float>& input1Values,
+                        std::vector<int32_t> input1Shape,
+                        std::vector<int32_t> input2NewShape,
+                        std::vector<int32_t> input2Shape,
+                        std::vector<float>& expectedOutputValues,
+                        std::vector<int32_t> expectedOutputShape)
+{
+    using namespace tflite;
+
+    std::vector<char> modelBuffer = CreateResizeTfLiteModel(operatorCode,
+                                                            ::tflite::TensorType_FLOAT32,
+                                                            input1Shape,
+                                                            input2NewShape,
+                                                            input2Shape,
+                                                            expectedOutputShape);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+
+    // The model will be executed using tflite and using the armnn delegate so that the outputs
+    // can be compared.
+
+    // Create TfLite Interpreter with armnn delegate
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create TfLite Interpreter without armnn delegate
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+                        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                                         armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data for the armnn interpreter
+    armnnDelegate::FillInput(armnnDelegateInterpreter, 0, input1Values);
+    armnnDelegate::FillInput(armnnDelegateInterpreter, 1, input2NewShape);
+
+    // Set input data for the tflite interpreter
+    armnnDelegate::FillInput(tfLiteInterpreter, 0, input1Values);
+    armnnDelegate::FillInput(tfLiteInterpreter, 1, input2NewShape);
+
+    // Run EnqueWorkload
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
+    auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
+    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
+    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
+    for (size_t i = 0; i < expectedOutputValues.size(); i++)
+    {
+        CHECK(expectedOutputValues[i] == doctest::Approx(armnnDelegateOutputData[i]));
+        CHECK(armnnDelegateOutputData[i] == doctest::Approx(tfLiteDelageOutputData[i]));
+    }
+
+    armnnDelegateInterpreter.reset(nullptr);
+}
+
+} // anonymous namespace
\ No newline at end of file
diff --git a/delegate/src/test/TestUtils.hpp b/delegate/src/test/TestUtils.hpp
new file mode 100644
index 0000000..162d62f
--- /dev/null
+++ b/delegate/src/test/TestUtils.hpp
@@ -0,0 +1,26 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/interpreter.h>
+
+namespace armnnDelegate
+{
+
+/// Can be used to assign input data from a vector to a model input.
+/// Example usage can be found in ResizeTesthelper.hpp
+template <typename T>
+void FillInput(std::unique_ptr<tflite::Interpreter>& interpreter, int inputIndex, std::vector<T>& inputValues)
+{
+    auto tfLiteDelegateInputId = interpreter->inputs()[inputIndex];
+    auto tfLiteDelageInputData = interpreter->typed_tensor<T>(tfLiteDelegateInputId);
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        tfLiteDelageInputData[i] = inputValues[i];
+    }
+}
+
+} // namespace armnnDelegate