IVGCVSW-2926 Add test for ResNetV2 50 quantized tflite model

Change-Id: I9389cf32d0efb303ea0930ecb4a87af884943b06
Signed-off-by: nikraj01 <nikhil.raj@arm.com>
diff --git a/tests/TfLiteResNetV2-50-Quantized-Armnn/TfLiteResNetV2-50-Quantized-Armnn.cpp b/tests/TfLiteResNetV2-50-Quantized-Armnn/TfLiteResNetV2-50-Quantized-Armnn.cpp
new file mode 100644
index 0000000..a4d0a01
--- /dev/null
+++ b/tests/TfLiteResNetV2-50-Quantized-Armnn/TfLiteResNetV2-50-Quantized-Armnn.cpp
@@ -0,0 +1,63 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "../InferenceTest.hpp"
+#include "../ImagePreprocessor.hpp"
+#include "armnnTfLiteParser/ITfLiteParser.hpp"
+
+using namespace armnnTfLiteParser;
+
+int main(int argc, char* argv[])
+{
+    int retVal = EXIT_FAILURE;
+    try
+    {
+        std::vector<ImageSet> imageSet =
+        {
+            {"Dog.jpg", 209},
+            {"Cat.jpg", 283},
+            {"shark.jpg", 3},
+
+        };
+
+        armnn::TensorShape inputTensorShape({ 1, 299, 299, 3  });
+
+        using DataType = uint8_t;
+        using DatabaseType = ImagePreprocessor<DataType>;
+        using ParserType = armnnTfLiteParser::ITfLiteParser;
+        using ModelType = InferenceModel<ParserType, DataType>;
+
+        // Coverity fix: ClassifierInferenceTestMain() may throw uncaught exceptions.
+        retVal = armnn::test::ClassifierInferenceTestMain<DatabaseType,
+                                                          ParserType>(
+                     argc, argv,
+                     "quant_resnet_v2_50_model.tflite",          // model name
+                     true,                                       // model is binary
+                     "input",                                    // input tensor name
+                     "output",                                   // output tensor name
+                     { 0, 1, 2 },                                // test images to test with as above
+                     [&imageSet](const char* dataDir, const ModelType & model) {
+                         // we need to get the input quantization parameters from
+                         // the parsed model
+                         auto inputBinding = model.GetInputBindingInfo();
+                         return DatabaseType(
+                             dataDir,
+                             299,
+                             299,
+                             imageSet,
+                             inputBinding.second.GetQuantizationScale(),
+                             inputBinding.second.GetQuantizationOffset());
+                     },
+                     &inputTensorShape);
+    }
+    catch (const std::exception& e)
+    {
+        // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
+        // exception of type std::length_error.
+        // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
+        std::cerr << "WARNING: " << *argv << ": An error has occurred when running "
+                     "the classifier inference tests: " << e.what() << std::endl;
+    }
+    return retVal;
+}