IVGCVSW-3935 Add Quantizer support for INSTANCE_NORMALIZATION

Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: I175008c81da028fb5bdc71e0abff06bc6e58734c
diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp
index 4989e4a..9819d71 100644
--- a/src/armnn/QuantizerVisitor.cpp
+++ b/src/armnn/QuantizerVisitor.cpp
@@ -310,6 +310,15 @@
     }
 }
 
+void QuantizerVisitor::VisitInstanceNormalizationLayer(const IConnectableLayer* layer,
+                                                       const InstanceNormalizationDescriptor& descriptor,
+                                                       const char* name)
+{
+    IConnectableLayer* newLayer = m_QuantizedNetwork->AddInstanceNormalizationLayer(descriptor, name);
+    RecordLayer(layer, newLayer);
+    SetQuantizedInputConnections(layer, newLayer);
+}
+
 void QuantizerVisitor::VisitMeanLayer(const IConnectableLayer* layer,
                                         const MeanDescriptor& meanDescriptor,
                                         const char* name)
diff --git a/src/armnn/QuantizerVisitor.hpp b/src/armnn/QuantizerVisitor.hpp
index b2acc7c..d1c4375 100644
--- a/src/armnn/QuantizerVisitor.hpp
+++ b/src/armnn/QuantizerVisitor.hpp
@@ -89,6 +89,10 @@
 
     void VisitInputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name = nullptr) override;
 
+    void VisitInstanceNormalizationLayer(const IConnectableLayer* layer,
+                                         const InstanceNormalizationDescriptor& instanceNormalizationDescriptor,
+                                         const char* name = nullptr) override;
+
     void VisitMeanLayer(const IConnectableLayer* layer,
                         const MeanDescriptor& meanDescriptor,
                         const char* name = nullptr) override;
@@ -96,8 +100,8 @@
     void VisitMultiplicationLayer(const IConnectableLayer* layer,
                                   const char* name = nullptr) override;
 
-    void VisitNormalizationLayer(const armnn::IConnectableLayer* layer,
-                                 const armnn::NormalizationDescriptor& normalizationDescriptor,
+    void VisitNormalizationLayer(const IConnectableLayer* layer,
+                                 const NormalizationDescriptor& normalizationDescriptor,
                                  const char* name = nullptr) override;
 
     void VisitOutputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name = nullptr)  override;
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index 6a217f3..6f7c115 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -1009,6 +1009,59 @@
     TestQuantizeDepthwiseConvolution2d(true);
 }
 
+BOOST_AUTO_TEST_CASE(QuantizeInstanceNormalization)
+{
+    class TestInstanceNormalizationQuantization : public TestQuantization
+    {
+    public:
+        TestInstanceNormalizationQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
+            : TestQuantization(inputShape, outputShape) {}
+
+        TestInstanceNormalizationQuantization(const QuantizerOptions& options,
+                                              const TensorShape& inputShape,
+                                              const TensorShape& outputShape)
+            : TestQuantization(options, inputShape, outputShape) {}
+
+        virtual void VisitInstanceNormalizationLayer(const IConnectableLayer* layer,
+                                                     const InstanceNormalizationDescriptor& descriptor,
+                                                     const char* name = nullptr)
+        {
+            const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
+
+            const OffsetScalePair qAsymm8Params{ 30.0f / g_Asymm8QuantizationBase, 128 };
+            const OffsetScalePair qSymm16Params{ 15.0f / g_Symm16QuantizationBase, 0 };
+
+            TestQuantizationParams(info, qAsymm8Params, qSymm16Params);
+        }
+    };
+
+    const TensorShape tensorShape{ 1, 4, 4, 1 };
+    const TensorInfo tensorInfo(tensorShape, DataType::Float32);
+
+    INetworkPtr network = INetwork::Create();
+
+    IConnectableLayer* inputLayer        = network->AddInputLayer(0);
+    IConnectableLayer* instanceNormLayer = network->AddInstanceNormalizationLayer(InstanceNormalizationDescriptor());
+    IConnectableLayer* outputLayer       = network->AddOutputLayer(0);
+
+    inputLayer->GetOutputSlot(0).Connect(instanceNormLayer->GetInputSlot(0));
+    instanceNormLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+
+    inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+    instanceNormLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    // test QAsymm8 quantization
+    INetworkPtr quantizedNetworkQAsymm8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
+    TestInstanceNormalizationQuantization validatorQAsymm8(tensorShape, tensorShape);
+    VisitLayersTopologically(quantizedNetworkQAsymm8.get(), validatorQAsymm8);
+
+    // test QSymm16 quantization
+    const QuantizerOptions options(DataType::QuantisedSymm16);
+    INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), options)->ExportNetwork();
+    TestInstanceNormalizationQuantization validatorQSymm16(options, tensorShape, tensorShape);
+    VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
+}
+
 INetworkPtr CreateNetworkWithSoftmaxLayer(const SoftmaxDescriptor& descriptor, const TensorShape& shape)
 {
     INetworkPtr network = INetwork::Create();