IVGCVSW-3148 Add end to end test for Dequantize layer to Ref, Cl, Neon

Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: Iaf9c290c093b7d84949993439568e55433938b4e
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index 8fbd74d..7512c89 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -16,6 +16,7 @@
     ConvertFp32ToFp16TestImpl.hpp
     DebugTestImpl.hpp
     DequantizeTestImpl.hpp
+    DequantizeEndToEndTestImpl.hpp
     DetectionPostProcessLayerTestImpl.hpp
     DetectionPostProcessTestImpl.hpp
     EndToEndTestImpl.hpp
diff --git a/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp
new file mode 100644
index 0000000..e624159
--- /dev/null
+++ b/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp
@@ -0,0 +1,101 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "CommonTestUtils.hpp"
+
+#include <armnn/INetwork.hpp>
+#include <ResolveType.hpp>
+
+namespace
+{
+
+template<typename T>
+armnn::INetworkPtr CreateDequantizeNetwork(const armnn::TensorInfo& inputInfo,
+                                           const armnn::TensorInfo& outputInfo)
+{
+    armnn::INetworkPtr net(armnn::INetwork::Create());
+
+    armnn::IConnectableLayer* inputLayer = net->AddInputLayer(0);
+    armnn::IConnectableLayer* dequantizeLayer = net->AddDequantizeLayer("Dequantize");
+    armnn::IConnectableLayer* outputLayer = net->AddOutputLayer(0, "output");
+    Connect(inputLayer, dequantizeLayer, inputInfo, 0, 0);
+    Connect(dequantizeLayer, outputLayer, outputInfo, 0, 0);
+
+    return net;
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void DequantizeEndToEndLayerTestImpl(const std::vector<BackendId>& backends,
+                                     const armnn::TensorShape& tensorShape,
+                                     const std::vector<T>& input,
+                                     const std::vector<float>& expectedOutput,
+                                     float scale,
+                                     int32_t offset)
+{
+    armnn::TensorInfo inputInfo(tensorShape, ArmnnType);
+    armnn::TensorInfo outputInfo(tensorShape, armnn::DataType::Float32);
+
+    inputInfo.SetQuantizationScale(scale);
+    inputInfo.SetQuantizationOffset(offset);
+
+    // Builds up the structure of the network
+    armnn::INetworkPtr net = CreateDequantizeNetwork<T>(inputInfo, outputInfo);
+
+    BOOST_TEST_CHECKPOINT("create a network");
+
+    std::map<int, std::vector<T>> inputTensorData = { { 0, input } };
+    std::map<int, std::vector<float>> expectedOutputData = { { 0, expectedOutput } };
+
+    EndToEndLayerTestImpl<ArmnnType, armnn::DataType::Float32>(
+            move(net), inputTensorData, expectedOutputData, backends);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void DequantizeEndToEndSimple(const std::vector<BackendId>& backends)
+{
+    const armnn::TensorShape tensorShape({ 1, 2, 2, 4 });
+    std::vector<T> inputData = std::vector<T>(
+    {
+        2, 4, 6, 8,
+        10, 12, 14, 16,
+        18, 20, 22, 24,
+        26, 28, 30, 32
+    });
+
+    std::vector<float> expectedOutputData = std::vector<float>(
+    {
+        1.0f, 2.0f, 3.0f, 4.0f,
+        5.0f, 6.0f, 7.0f,  8.0f,
+        9.0f, 10.0f, 11.0f, 12.0f,
+        13.0f, 14.0f, 15.0f, 16.0f
+    });
+    DequantizeEndToEndLayerTestImpl<ArmnnType>(backends, tensorShape, inputData, expectedOutputData, 0.5f, 0);
+};
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void DequantizeEndToEndOffset(const std::vector<BackendId>& backends)
+{
+    const armnn::TensorShape tensorShape({ 1, 2, 2, 4 });
+    std::vector<T> inputData = std::vector<T>(
+    {
+        3, 5, 7, 9,
+        11, 13, 15, 17,
+        19, 21, 23, 25,
+        27, 29, 31, 33
+    });
+
+    std::vector<float> expectedOutputData = std::vector<float>(
+    {
+        1.0f, 2.0f, 3.0f, 4.0f,
+        5.0f, 6.0f, 7.0f,  8.0f,
+        9.0f, 10.0f, 11.0f, 12.0f,
+        13.0f, 14.0f, 15.0f, 16.0f
+    });
+    DequantizeEndToEndLayerTestImpl<ArmnnType>(backends, tensorShape, inputData, expectedOutputData, 0.5f, 1);
+};
+
+} // anonymous namespace
diff --git a/src/backends/cl/test/ClEndToEndTests.cpp b/src/backends/cl/test/ClEndToEndTests.cpp
index 990a156..174f392 100644
--- a/src/backends/cl/test/ClEndToEndTests.cpp
+++ b/src/backends/cl/test/ClEndToEndTests.cpp
@@ -4,8 +4,10 @@
 //
 
 #include <backendsCommon/test/EndToEndTestImpl.hpp>
-#include <backendsCommon/test/ConcatTestImpl.hpp>
+
 #include <backendsCommon/test/ArithmeticTestImpl.hpp>
+#include <backendsCommon/test/ConcatTestImpl.hpp>
+#include <backendsCommon/test/DequantizeEndToEndTestImpl.hpp>
 #include <backendsCommon/test/SplitterEndToEndTestImpl.hpp>
 
 #include <boost/test/unit_test.hpp>
@@ -49,6 +51,16 @@
     ConcatDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
 }
 
+BOOST_AUTO_TEST_CASE(DequantizeEndToEndSimpleTest)
+{
+    DequantizeEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(DequantizeEndToEndOffsetTest)
+{
+    DequantizeEndToEndOffset<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
 BOOST_AUTO_TEST_CASE(ClGreaterSimpleEndToEndTest)
 {
     const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0,  1, 1, 1, 1,
diff --git a/src/backends/neon/test/NeonEndToEndTests.cpp b/src/backends/neon/test/NeonEndToEndTests.cpp
index d0aa233..7a22b45 100644
--- a/src/backends/neon/test/NeonEndToEndTests.cpp
+++ b/src/backends/neon/test/NeonEndToEndTests.cpp
@@ -4,8 +4,10 @@
 //
 
 #include <backendsCommon/test/EndToEndTestImpl.hpp>
-#include <backendsCommon/test/ConcatTestImpl.hpp>
+
 #include <backendsCommon/test/ArithmeticTestImpl.hpp>
+#include <backendsCommon/test/ConcatTestImpl.hpp>
+#include <backendsCommon/test/DequantizeEndToEndTestImpl.hpp>
 #include <backendsCommon/test/SplitterEndToEndTestImpl.hpp>
 
 #include <boost/test/unit_test.hpp>
@@ -123,6 +125,16 @@
     ConcatDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
 }
 
+BOOST_AUTO_TEST_CASE(DequantizeEndToEndSimpleTest)
+{
+    DequantizeEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(DequantizeEndToEndOffsetTest)
+{
+    DequantizeEndToEndOffset<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
 BOOST_AUTO_TEST_CASE(NeonSplitter1dEndToEndTest)
 {
     Splitter1dEndToEnd<armnn::DataType::Float32>(defaultBackends);
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index a0f1028..8e75eba 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -5,6 +5,7 @@
 
 #include <backendsCommon/test/EndToEndTestImpl.hpp>
 
+#include <backendsCommon/test/DequantizeEndToEndTestImpl.hpp>
 #include <backendsCommon/test/DetectionPostProcessTestImpl.hpp>
 #include <backendsCommon/test/GatherEndToEndTestImpl.hpp>
 #include <backendsCommon/test/ConcatTestImpl.hpp>
@@ -456,6 +457,16 @@
     GatherMultiDimEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
 }
 
+BOOST_AUTO_TEST_CASE(DequantizeEndToEndSimpleTest)
+{
+    DequantizeEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(DequantizeEndToEndOffsetTest)
+{
+    DequantizeEndToEndOffset<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
 BOOST_AUTO_TEST_CASE(RefDetectionPostProcessRegularNmsTest)
 {
     std::vector<float> boxEncodings({