MLCE-325 ArmnnQuantizer incorrectly Quantizes all DataTypes

 * ArmnnQuantizer incorrectly converts boolean or integer DataTypes to quantized
   DataTypes. This breaks layers like ArgMinMax where the output contains the
   index of an element along an axis.

Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: I272c3d0f48bf884a2480bfa43eb14ec265fcda6b
diff --git a/src/armnn/DynamicQuantizationVisitor.cpp b/src/armnn/DynamicQuantizationVisitor.cpp
index 2202910..02e7699 100644
--- a/src/armnn/DynamicQuantizationVisitor.cpp
+++ b/src/armnn/DynamicQuantizationVisitor.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -106,8 +106,7 @@
 {
     IgnoreUnused(name);
     IgnoreUnused(desc);
-    SetRange(layer, 0, -20.f, 20.f);
-    AddToCalibratedLayers(layer);
+    AddToNonCalibratedLayers(layer);
 }
 
 void DynamicQuantizationVisitor::VisitBatchNormalizationLayer(const IConnectableLayer* layer,
diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp
index 7889f03..0e9d224 100644
--- a/src/armnn/QuantizerVisitor.cpp
+++ b/src/armnn/QuantizerVisitor.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -52,16 +52,20 @@
         IInputSlot& newInputSlot = quantizedLayer->GetInputSlot(i);
         IOutputSlot& newOutputSlot = prevQuantizedLayer->GetOutputSlot(slotIdx);
         newOutputSlot.Connect(newInputSlot);
-
-        // Fetch the min/max ranges that were computed earlier
-        auto range = m_Ranges.GetRange(layerToFind.GetGuid(), slotIdx);
-        OffsetScalePair qParams = m_QuantizationScheme->ComputeScheme(range.first, range.second);
-
-        // Set the quantization params
         TensorInfo info(outputSlot->GetTensorInfo());
-        info.SetDataType(m_QuantizationScheme->GetDataType());
-        info.SetQuantizationOffset(qParams.second);
-        info.SetQuantizationScale(qParams.first);
+
+        // Only try to set quantization params on tensors that can be quantized
+        if (inputSlot->GetConnectedOutputSlot()->GetTensorInfo().GetDataType() != DataType::Boolean &&
+            inputSlot->GetConnectedOutputSlot()->GetTensorInfo().GetDataType() != DataType::Signed32 &&
+            inputSlot->GetConnectedOutputSlot()->GetTensorInfo().GetDataType() != DataType::Signed64)
+        {
+            // Fetch the min/max ranges that were computed earlier
+            auto range = m_Ranges.GetRange(layerToFind.GetGuid(), slotIdx);
+            OffsetScalePair qParams = m_QuantizationScheme->ComputeScheme(range.first, range.second);
+            info.SetDataType(m_QuantizationScheme->GetDataType());
+            info.SetQuantizationOffset(qParams.second);
+            info.SetQuantizationScale(qParams.first);
+        }
         newOutputSlot.SetTensorInfo(info);
     }
 }
diff --git a/src/armnn/StaticRangeVisitor.cpp b/src/armnn/StaticRangeVisitor.cpp
index 0e820c3..210c666 100644
--- a/src/armnn/StaticRangeVisitor.cpp
+++ b/src/armnn/StaticRangeVisitor.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -107,11 +107,20 @@
     }
 }
 
-void StaticRangeVisitor::VisitFullyConnectedLayer(const IConnectableLayer *layer,
+void StaticRangeVisitor::VisitArgMinMaxLayer(const armnn::IConnectableLayer* layer,
+                                             const armnn::ArgMinMaxDescriptor& argMinMaxDescriptor,
+                                             const char* name)
+{
+    IgnoreUnused(argMinMaxDescriptor);
+    IgnoreUnused(name);
+    ForwardParentParameters(layer);
+}
+
+void StaticRangeVisitor::VisitFullyConnectedLayer(const IConnectableLayer* layer,
                                                   const FullyConnectedDescriptor& desc,
                                                   const ConstTensor& weights,
                                                   const Optional<ConstTensor>& biases,
-                                                  const char *name)
+                                                  const char* name)
 {
     IgnoreUnused(desc);
     IgnoreUnused(weights);
diff --git a/src/armnn/StaticRangeVisitor.hpp b/src/armnn/StaticRangeVisitor.hpp
index 37ebec8..20e3cb0 100644
--- a/src/armnn/StaticRangeVisitor.hpp
+++ b/src/armnn/StaticRangeVisitor.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -25,6 +25,10 @@
     /// Functions to set the Range on a per-layer-type basis
     void VisitAdditionLayer(const IConnectableLayer* layer, const char* name = nullptr) override;
 
+    void VisitArgMinMaxLayer(const IConnectableLayer* layer,
+                             const ArgMinMaxDescriptor& desc,
+                             const char* name = nullptr) override;
+
     void VisitBatchNormalizationLayer(const IConnectableLayer* layer,
                                       const BatchNormalizationDescriptor& desc,
                                       const ConstTensor& mean,
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index a3c4581..da85029 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -8,7 +8,6 @@
 #include "../NetworkQuantizerUtils.hpp"
 #include "../OverrideInputRangeVisitor.hpp"
 #include "../RangeTracker.hpp"
-#include "../../armnnQuantizer/CommandLineProcessor.hpp"
 
 #include <armnn/INetwork.hpp>
 #include <armnn/LayerVisitorBase.hpp>
@@ -314,6 +313,50 @@
     return network;
 }
 
+class TestArgMinMaxQuantization : public TestQuantization
+{
+public:
+    TestArgMinMaxQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
+            : TestQuantization(inputShape, outputShape) {}
+
+    TestArgMinMaxQuantization(const QuantizerOptions& options,
+                              const TensorShape& inputShape,
+                              const TensorShape& outputShape)
+            : TestQuantization(options, inputShape, outputShape) {}
+
+    void VisitArgMinMaxLayer(const IConnectableLayer* layer,
+                             const ArgMinMaxDescriptor&,
+                             const char* name = nullptr) override
+    {
+        IgnoreUnused(name);
+        TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
+
+        BOOST_CHECK(info.GetDataType() == DataType::Signed32);
+    }
+};
+
+INetworkPtr CreateNetworkWithArgMinMaxLayer(const ArgMinMaxDescriptor& descriptor, const TensorShape& shape)
+{
+    INetworkPtr network = INetwork::Create();
+
+    // Add the layers
+    IConnectableLayer* input0 = network->AddInputLayer(0);
+    IConnectableLayer* activation = network->AddArgMinMaxLayer(descriptor);
+    IConnectableLayer* output = network->AddOutputLayer(2);
+
+    // Establish connections
+    input0->GetOutputSlot(0).Connect(activation->GetInputSlot(0));
+    activation->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+    // Set TensorInfo
+    TensorInfo inInfo(shape, DataType::Float32);
+    input0->GetOutputSlot(0).SetTensorInfo(inInfo);
+    TensorInfo outInfo({1}, DataType::Signed32);
+    activation->GetOutputSlot(0).SetTensorInfo(outInfo);
+
+    return network;
+}
+
 INetworkPtr CreateNetworkWithInputOutputLayers()
 {
     INetworkPtr network = INetwork::Create();
@@ -435,6 +478,35 @@
     VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
 }
 
+BOOST_AUTO_TEST_CASE(QuantizeArgMax)
+{
+    ArgMinMaxDescriptor descriptor;
+    descriptor.m_Function = ArgMinMaxFunction::Max;
+
+    const TensorShape shape{1U};
+    INetworkPtr network = CreateNetworkWithArgMinMaxLayer(descriptor, shape);
+
+    const QuantizerOptions qAsymmU8Options(DataType::QAsymmU8);
+    INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get(), qAsymmU8Options)->ExportNetwork();
+    TestArgMinMaxQuantization validatorQAsymmU8(shape, shape);
+    VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
+
+    const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
+    INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
+    TestArgMinMaxQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
+    VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
+
+    const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
+    INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
+    TestArgMinMaxQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
+    VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
+
+    const QuantizerOptions qSymmS16options(DataType::QSymmS16);
+    INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
+    TestArgMinMaxQuantization validatorQSymmS16(qSymmS16options, shape, shape);
+    VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+}
+
 BOOST_AUTO_TEST_CASE(QuantizeLinearActivation)
 {
     ActivationDescriptor descriptor;