Rename quantized data types

!armnn:2571

Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
Change-Id: I06977553a097479a2a996e76a106249673d31ed7
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index b53432c..a0ab9e5 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -2961,7 +2961,7 @@
     // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
     // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
     // (QuantizationOffset - QuantizationOffset) * scale = 0.
-    if (inputInfo.GetDataType() == armnn::DataType::QuantisedAsymm8)
+    if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8)
     {
         descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
     }
diff --git a/Utils.cpp b/Utils.cpp
index 0211e92..cdebfae 100644
--- a/Utils.cpp
+++ b/Utils.cpp
@@ -43,7 +43,7 @@
     {
     case armnn::DataType::Float16:
     case armnn::DataType::Float32:
-    case armnn::DataType::QuantisedAsymm8:
+    case armnn::DataType::QAsymmU8:
     case armnn::DataType::QuantizedSymm8PerAxis:
         SwizzleAndroidNn4dTensorToArmNn(tensor.GetShape(), input, output, armnn::GetDataTypeSize(dataType), mappings);
         break;
@@ -83,7 +83,7 @@
             type = armnn::DataType::Float32;
             break;
         case V1_0::OperandType::TENSOR_QUANT8_ASYMM:
-            type = armnn::DataType::QuantisedAsymm8;
+            type = armnn::DataType::QAsymmU8;
             break;
         case V1_0::OperandType::TENSOR_INT32:
             type = armnn::DataType::Signed32;
@@ -119,13 +119,13 @@
             type = armnn::DataType::QuantizedSymm8PerAxis;
             break;
         case V1_2::OperandType::TENSOR_QUANT8_ASYMM:
-            type = armnn::DataType::QuantisedAsymm8;
+            type = armnn::DataType::QAsymmU8;
             break;
         case V1_2::OperandType::TENSOR_QUANT8_SYMM:
             type = armnn::DataType::QSymmS8;
             break;
         case V1_2::OperandType::TENSOR_QUANT16_SYMM:
-            type = armnn::DataType::QuantisedSymm16;
+            type = armnn::DataType::QSymmS16;
             break;
         case V1_2::OperandType::TENSOR_INT32:
             type = armnn::DataType::Signed32;
@@ -228,7 +228,7 @@
             dumpElementFunction = &DumpTensorElement<float>;
             break;
         }
-        case armnn::DataType::QuantisedAsymm8:
+        case armnn::DataType::QAsymmU8:
         {
             dumpElementFunction = &DumpTensorElement<uint8_t, uint32_t>;
             break;