IVGCVSW-5378 'TfLiteDelegate: Implement the ElementWiseUnary operators '

* Moved ElementwiseUnary operators tests into single file
* Implemented FP32 test for supported ElementwiseUnary operators

Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: I4b7eab190c3c8edb50927b8e1e94dd353597efcb
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index aba27df..aa48435 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -88,10 +88,9 @@
 
 set(armnnDelegate_unittest_sources)
 list(APPEND armnnDelegate_unittest_sources
-        src/test/AbsTest.cpp
         src/test/ArmnnDelegateTest.cpp
-        src/test/ElementwiseUnaryTestHelper.hpp
-        src/test/SqrtTest.cpp)
+        src/test/ElementwiseUnaryTest.cpp
+        src/test/ElementwiseUnaryTestHelper.hpp)
 
 add_executable(DelegateUnitTests ${armnnDelegate_unittest_sources})
 target_include_directories(DelegateUnitTests PRIVATE src)
diff --git a/delegate/src/armnn_delegate.cpp b/delegate/src/armnn_delegate.cpp
index 5cbdb6f..82cf573 100644
--- a/delegate/src/armnn_delegate.cpp
+++ b/delegate/src/armnn_delegate.cpp
@@ -130,7 +130,7 @@
         {
             if (std::find(supportedDevices.cbegin(), supportedDevices.cend(), backend) == supportedDevices.cend())
             {
-                TFLITE_LOG_PROD_ONCE(tflite::TFLITE_LOG_INFO,
+                TFLITE_LOG_PROD(tflite::TFLITE_LOG_INFO,
                     "TfLiteArmnnDelegate: Requested unknown backend %s", backend.Get().c_str());
             }
             else
diff --git a/delegate/src/test/AbsTest.cpp b/delegate/src/test/AbsTest.cpp
deleted file mode 100644
index f9c345e..0000000
--- a/delegate/src/test/AbsTest.cpp
+++ /dev/null
@@ -1,98 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ElementwiseUnaryTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-TEST_SUITE("AbsTest")
-{
-
-TEST_CASE ("AbsTestFloat32")
-{
-    using namespace tflite;
-
-    const std::vector<int32_t> inputShape  { { 3, 1, 2} };
-    std::vector<char> modelBuffer = CreateElementwiseUnaryTfLiteModel(BuiltinOperator_ABS,
-                                                                      ::tflite::TensorType_FLOAT32,
-                                                                      inputShape);
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
-
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
-    // Create the ArmNN Delegate
-    auto delegateOptions = TfLiteArmnnDelegateOptionsDefault();
-    auto armnnDelegate = TfLiteArmnnDelegateCreate(delegateOptions);
-    CHECK(armnnDelegate != nullptr);
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(armnnDelegate) == kTfLiteOk);
-
-    // Set input data
-    std::vector<float> inputValues
-    {
-        -0.1f, -0.2f, -0.3f,
-        0.1f,  0.2f,  0.3f
-    };
-    auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
-    auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateInputId);
-    for (unsigned int i = 0; i < inputValues.size(); ++i)
-    {
-        tfLiteDelageInputData[i] = inputValues[i];
-    }
-
-    auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
-    auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInputId);
-    for (unsigned int i = 0; i < inputValues.size(); ++i)
-    {
-        armnnDelegateInputData[i] = inputValues[i];
-    }
-
-    // Run EnqueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    // Compare output data
-    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
-    auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
-
-    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
-    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
-
-    for (size_t i = 0; i < inputValues.size(); i++)
-    {
-        CHECK(std::abs(inputValues[i]) == armnnDelegateOutputData[i]);
-        CHECK(tfLiteDelageOutputData[i] == armnnDelegateOutputData[i]);
-    }
-}
-
-}
-
-} // namespace armnnDelegate
-
-
-
diff --git a/delegate/src/test/ElementwiseUnaryTest.cpp b/delegate/src/test/ElementwiseUnaryTest.cpp
new file mode 100644
index 0000000..c504707
--- /dev/null
+++ b/delegate/src/test/ElementwiseUnaryTest.cpp
@@ -0,0 +1,239 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ElementwiseUnaryTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+TEST_SUITE("ElementwiseUnaryTest")
+{
+
+TEST_CASE ("Abs_Float32_GpuAcc_Test")
+{
+    // Create the ArmNN Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                           armnn::Compute::CpuRef };
+    // Set input data
+    std::vector<float> inputValues
+    {
+        -0.1f, -0.2f, -0.3f,
+        0.1f,  0.2f,  0.3f
+    };
+    // Calculate output data
+    std::vector<float> expectedOutputValues(inputValues.size());
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        expectedOutputValues[i] = std::abs(inputValues[i]);
+    }
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_ABS, backends, inputValues, expectedOutputValues);
+}
+
+TEST_CASE ("Abs_Float32_CpuAcc_Test")
+{
+    // Create the ArmNN Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    // Set input data
+    std::vector<float> inputValues
+    {
+        -0.1f, -0.2f, -0.3f,
+        0.1f,  0.2f,  0.3f
+    };
+    // Calculate output data
+    std::vector<float> expectedOutputValues(inputValues.size());
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        expectedOutputValues[i] = std::abs(inputValues[i]);
+    }
+
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_ABS, backends, inputValues, expectedOutputValues);
+}
+
+TEST_CASE ("Exp_Float32_GpuAcc_Test")
+{
+    // Create the ArmNN Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    // Set input data
+    std::vector<float> inputValues
+    {
+        5.0f, 4.0f,
+        3.0f, 2.0f,
+        1.0f, 1.1f
+    };
+    // Set output data
+    std::vector<float> expectedOutputValues
+    {
+        148.413159102577f, 54.598150033144f,
+        20.085536923188f,  7.389056098931f,
+        2.718281828459f,  3.004166023946f
+    };
+
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_EXP, backends, inputValues, expectedOutputValues);
+}
+
+TEST_CASE ("Exp_Float32_CpuAcc_Test")
+{
+    // Create the ArmNN Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    // Set input data
+    std::vector<float> inputValues
+    {
+        5.0f, 4.0f,
+        3.0f, 2.0f,
+        1.0f, 1.1f
+    };
+    // Set output data
+    std::vector<float> expectedOutputValues
+    {
+        148.413159102577f, 54.598150033144f,
+        20.085536923188f,  7.389056098931f,
+        2.718281828459f,  3.004166023946f
+    };
+
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_EXP, backends, inputValues, expectedOutputValues);
+}
+
+TEST_CASE ("Neg_Float32_GpuAcc_Test")
+{
+    // Create the ArmNN Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    // Set input data
+    std::vector<float> inputValues
+    {
+        1.f, 0.f, 3.f,
+        25.f, 64.f, 100.f
+    };
+    // Set output data
+    std::vector<float> expectedOutputValues
+    {
+        -1.f, 0.f, -3.f,
+        -25.f, -64.f, -100.f
+    };
+
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_NEG, backends, inputValues, expectedOutputValues);
+}
+
+TEST_CASE ("Neg_Float32_CpuAcc_Test")
+{
+    // Create the ArmNN Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    // Set input data
+    std::vector<float> inputValues
+    {
+        1.f, 0.f, 3.f,
+        25.f, 64.f, 100.f
+    };
+    // Set output data
+    std::vector<float> expectedOutputValues
+    {
+        -1.f, 0.f, -3.f,
+        -25.f, -64.f, -100.f
+    };
+
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_NEG, backends, inputValues, expectedOutputValues);
+}
+
+TEST_CASE ("Rsqrt_Float32_GpuAcc_Test")
+{
+    // Create the ArmNN Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    // Set input data
+    std::vector<float> inputValues
+    {
+        1.f, 4.f, 16.f,
+        25.f, 64.f, 100.f
+    };
+    // Set output data
+    std::vector<float> expectedOutputValues
+    {
+        1.f, 0.5f, 0.25f,
+        0.2f, 0.125f, 0.1f
+    };
+
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_RSQRT, backends, inputValues, expectedOutputValues);
+}
+
+TEST_CASE ("Rsqrt_Float32_CpuAcc_Test")
+{
+    // Create the ArmNN Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    // Set input data
+    std::vector<float> inputValues
+    {
+        1.f, 4.f, 16.f,
+        25.f, 64.f, 100.f
+    };
+    // Set output data
+    std::vector<float> expectedOutputValues
+    {
+        1.f, 0.5f, 0.25f,
+        0.2f, 0.125f, 0.1f
+    };
+
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_RSQRT, backends, inputValues, expectedOutputValues);
+}
+
+TEST_CASE ("Sqrt_Float32_GpuAcc_Test")
+{
+    // Create the ArmNN Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    // Set input data
+    std::vector<float> inputValues
+    {
+        9.0f, 4.25f, 81.9f,
+        0.1f,  0.9f,  169.0f
+    };
+    // Calculate output data
+    std::vector<float> expectedOutputValues(inputValues.size());
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        expectedOutputValues[i] = std::sqrt(inputValues[i]);
+    }
+
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_SQRT, backends, inputValues, expectedOutputValues);
+}
+
+TEST_CASE ("Sqrt_Float32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    // Set input data
+    std::vector<float> inputValues
+    {
+        9.0f, 4.25f, 81.9f,
+        0.1f,  0.9f,  169.0f
+    };
+    // Calculate output data
+    std::vector<float> expectedOutputValues(inputValues.size());
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        expectedOutputValues[i] = std::sqrt(inputValues[i]);
+    }
+
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_SQRT, backends, inputValues, expectedOutputValues);
+}
+
+}
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/src/test/ElementwiseUnaryTestHelper.hpp b/delegate/src/test/ElementwiseUnaryTestHelper.hpp
new file mode 100644
index 0000000..4d45f4e
--- /dev/null
+++ b/delegate/src/test/ElementwiseUnaryTestHelper.hpp
@@ -0,0 +1,141 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+std::vector<char> CreateElementwiseUnaryTfLiteModel(tflite::BuiltinOperator unaryOperatorCode,
+                                                    tflite::TensorType tensorType,
+                                                    const std::vector <int32_t>& tensorShape)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::array<flatbuffers::Offset<tflite::Buffer>, 1> buffers;
+    buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
+
+    std::array<flatbuffers::Offset<Tensor>, 2> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(), tensorShape.size()),
+                              tensorType);
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(), tensorShape.size()),
+                              tensorType);
+
+    // create operator
+    const std::vector<int> operatorInputs{{0}};
+    const std::vector<int> operatorOutputs{{1}};
+    flatbuffers::Offset <Operator> unaryOperator =
+        CreateOperator(flatBufferBuilder,
+                       0,
+                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()));
+
+    const std::vector<int> subgraphInputs{{0}};
+    const std::vector<int> subgraphOutputs{{1}};
+    flatbuffers::Offset <SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                       flatBufferBuilder.CreateVector(&unaryOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+        flatBufferBuilder.CreateString("ArmnnDelegate: Elementwise Unary Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, unaryOperatorCode);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(&operatorCode, 1),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+void ElementwiseUnaryFP32Test(tflite::BuiltinOperator unaryOperatorCode,
+                              std::vector<armnn::BackendId>& backends,
+                              std::vector<float>& inputValues,
+                              std::vector<float>& expectedOutputValues)
+{
+    using namespace tflite;
+    const std::vector<int32_t> inputShape  { { 3, 1, 2} };
+    std::vector<char> modelBuffer = CreateElementwiseUnaryTfLiteModel(unaryOperatorCode,
+                                                                      ::tflite::TensorType_FLOAT32,
+                                                                      inputShape);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    auto armnnDelegate = TfLiteArmnnDelegateCreate(delegateOptions);
+    CHECK(armnnDelegate != nullptr);
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(armnnDelegate) == kTfLiteOk);
+
+    // Set input data
+    auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
+    auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateInputId);
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        tfLiteDelageInputData[i] = inputValues[i];
+    }
+
+    auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
+    auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInputId);
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        armnnDelegateInputData[i] = inputValues[i];
+    }
+    // Run EnqueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
+    auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
+    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
+    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
+    for (size_t i = 0; i < inputValues.size(); i++)
+    {
+        CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]);
+        CHECK(tfLiteDelageOutputData[i] == armnnDelegateOutputData[i]);
+    }
+}
+
+} // anonymous namespace
+
+
+
+
diff --git a/delegate/src/test/SqrtTest.cpp b/delegate/src/test/SqrtTest.cpp
deleted file mode 100644
index df3534d..0000000
--- a/delegate/src/test/SqrtTest.cpp
+++ /dev/null
@@ -1,97 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ElementwiseUnaryTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-TEST_SUITE("SqrtTest")
-{
-
-TEST_CASE ("SqrtTestFloat32")
-{
-    using namespace tflite;
-    const std::vector<int32_t> inputShape  { { 3, 1, 2} };
-    std::vector<char> modelBuffer = CreateElementwiseUnaryTfLiteModel(BuiltinOperator_SQRT,
-                                                                      ::tflite::TensorType_FLOAT32,
-                                                                      inputShape);
-
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                 (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
-    // Create the ArmNN Delegate
-    auto delegateOptions = TfLiteArmnnDelegateOptionsDefault();
-    auto armnnDelegate = TfLiteArmnnDelegateCreate(delegateOptions);
-    CHECK(armnnDelegate != nullptr);
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(armnnDelegate) == kTfLiteOk);
-
-    // Set input data
-    std::vector<float> inputValues
-    {
-        9.0f, 4.25f, 81.9f,
-        0.1f,  0.9f,  169.0f
-    };
-
-    auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
-    auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateInputId);
-    for (unsigned int i = 0; i < inputValues.size(); ++i)
-    {
-        tfLiteDelageInputData[i] = inputValues[i];
-    }
-
-    auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
-    auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInputId);
-    for (unsigned int i = 0; i < inputValues.size(); ++i)
-    {
-        armnnDelegateInputData[i] = inputValues[i];
-    }
-
-    // Run EnqueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    // Compare output data
-    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
-    auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
-    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
-    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
-    for (size_t i = 0; i < inputValues.size(); i++)
-    {
-        CHECK(std::sqrt(inputValues[i]) == armnnDelegateOutputData[i]);
-        CHECK(tfLiteDelageOutputData[i] == armnnDelegateOutputData[i]);
-    }
-
-}
-
-}
-
-} // namespace armnnDelegate
-
-
-