IVGCVSW-2970 Support QSymm16 for FullyConnected workloads

 * Add support for QSymm16 for FullyConnected
 * Add templating to Uint8 RefLayerTest to test QSymm16

Change-Id: Ie6e989daf2ca966d6c6805b8017126eb77ebfec4
Signed-off-by: Francis Murtagh <francis.murtagh@arm.com>
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index a1c74df..d9779e4 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -511,6 +511,23 @@
 
     ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
         workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", "input", "weights", "output");
+
+    // Check the supported data types
+    std::vector<DataType> supportedTypes =
+    {
+            DataType::Float32,
+            DataType::Float16,
+            DataType::QuantisedAsymm8,
+            DataType::QuantisedSymm16
+    };
+
+    ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
+                      supportedTypes,
+                      "FullyConnectedQueueDescriptor");
+
+    ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
+                      {workloadInfo.m_InputTensorInfos[0].GetDataType()},
+                      "FullyConnectedQueueDescriptor");
 }
 
 //---------------------------------------------------------------
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 0490a94..7631071 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -17,6 +17,7 @@
 #include <backendsCommon/BackendRegistry.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
 #include <backendsCommon/IBackendInternal.hpp>
+#include <backendsCommon/test/WorkloadTestUtils.hpp>
 
 #include <boost/cast.hpp>
 #include <boost/iterator/transform_iterator.hpp>
@@ -40,26 +41,6 @@
     return TensorInfo(info.GetShape(), type.value(), info.GetQuantizationScale(), info.GetQuantizationOffset());
 }
 
-Optional<DataType> GetBiasTypeFromWeightsType(Optional<DataType> weightsType)
-{
-    if (!weightsType)
-    {
-        return weightsType;
-    }
-
-    switch(weightsType.value())
-    {
-        case DataType::Float16:
-        case DataType::Float32:
-            return weightsType;
-        case DataType::QuantisedAsymm8:
-            return DataType::Signed32;
-        default:
-            BOOST_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
-    }
-    return EmptyOptional();
-}
-
 } // anonymous namespace
 
 bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
diff --git a/src/backends/backendsCommon/test/FullyConnectedTestImpl.hpp b/src/backends/backendsCommon/test/FullyConnectedTestImpl.hpp
index 3e6223a..402a3e6 100644
--- a/src/backends/backendsCommon/test/FullyConnectedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/FullyConnectedTestImpl.hpp
@@ -5,61 +5,8 @@
 
 #include <ResolveType.hpp>
 #include "WorkloadTestUtils.hpp"
-
 #include <backendsCommon/IBackendInternal.hpp>
 
-template<typename T, typename B>
-LayerTestResult<T, 2> SimpleFullyConnectedTestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::TensorInfo inputTensorInfo,
-    armnn::TensorInfo outputTensorInfo,
-    armnn::TensorInfo weightsDesc,
-    armnn::TensorInfo biasesDesc,
-    boost::multi_array<T, 2>& weights,
-    boost::multi_array<B, 1>& bias,
-    boost::multi_array<T, 4>& input,
-    bool biasEnabled,
-    bool transposeWeights)
-{
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
-    armnn::FullyConnectedQueueDescriptor data;
-    armnn::WorkloadInfo info;
-    armnn::ScopedCpuTensorHandle weightsTensor(weightsDesc);
-    armnn::ScopedCpuTensorHandle biasTensor(biasesDesc);
-
-    AllocateAndCopyDataToITensorHandle(&weightsTensor, &weights[0][0]);
-    AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
-
-    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
-    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
-    data.m_Weight = &weightsTensor;
-    data.m_Bias = &biasTensor;
-    data.m_Parameters.m_BiasEnabled = biasEnabled;
-    data.m_Parameters.m_TransposeWeightMatrix = transposeWeights;
-
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFullyConnected(data, info);
-    LayerTestResult<T, 2> result(outputTensorInfo);
-
-    inputHandle->Allocate();
-    outputHandle->Allocate();
-    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
-    ExecuteWorkload(*workload, memoryManager);
-
-    if (workloadFactory.GetBackendId() == armnn::Compute::CpuRef)
-    {
-        workload->PostAllocationConfigure();
-        workload->Execute();
-    }
-
-    CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
-
-    return result;
-}
-
 LayerTestResult<float, 2> FullyConnectedFloat32Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -155,72 +102,6 @@
     return result;
 }
 
-LayerTestResult<uint8_t, 2> FullyConnectedUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    bool biasEnabled)
-{
-    constexpr static unsigned int inputWidth = 3u;
-    constexpr static unsigned int inputHeight = 2u;
-    constexpr static unsigned int inputChannels = 1u;
-
-    constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels;
-
-    constexpr static unsigned int outputChannels = 2u;
-
-    armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, armnn::DataType::QuantisedAsymm8);
-    inputTensorInfo.SetQuantizationScale(0.1f);
-    inputTensorInfo.SetQuantizationOffset(63);
-
-    armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, armnn::DataType::QuantisedAsymm8);
-    outputTensorInfo.SetQuantizationScale(5.f);
-    outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10);
-
-    armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, armnn::DataType::QuantisedAsymm8);
-    weightsDesc.SetQuantizationScale(0.2f);
-    weightsDesc.SetQuantizationOffset(93);
-
-    armnn::TensorInfo biasesDesc({ outputChannels }, armnn::DataType::Signed32);
-    biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale());
-    biasesDesc.SetQuantizationOffset(0);
-
-    LayerTestResult<uint8_t, 2> result(outputTensorInfo);
-
-    auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>{51, 124, 28,
-        251, 8, 92});
-
-    auto weights = MakeTensor<uint8_t, 2>(weightsDesc, std::vector<uint8_t>{51, 193, 42, 53, 175, 34,
-        210, 145, 23, 74, 34, 150});
-
-        // scale = 0.02
-        // offset = 0
-    auto bias = MakeTensor<int32_t, 1>(biasesDesc, std::vector<int32_t>{9250, 67500});
-
-    result = SimpleFullyConnectedTestImpl<uint8_t>(
-        workloadFactory,
-        memoryManager,
-        inputTensorInfo, outputTensorInfo,
-        weightsDesc, biasesDesc,
-        weights, bias, input,
-        biasEnabled, true
-    );
-
-    // Manually calculated.
-    // Note one of these values has been clamped to 0.
-    if (biasEnabled)
-    {
-        result.outputExpected = MakeTensor<uint8_t, 2>(outputTensorInfo, std::vector<uint8_t>{0, 242});
-    }
-    else
-    {
-        result.outputExpected = MakeTensor<uint8_t, 2>(outputTensorInfo, std::vector<uint8_t>{0, 32});
-    }
-
-    return result;
-}
-
-
-
 //
 // ArmNN variant of the AndroidNN fully_connected_float_large test.
 //
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index 9e57ec5..f8f5036 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -8,9 +8,15 @@
 #include <armnn/Tensor.hpp>
 
 #include <Half.hpp>
+#include "TensorCopyUtils.hpp"
+#include "WorkloadTestUtils.hpp"
 
+#include <backendsCommon/CpuTensorHandle.hpp>
 #include <backendsCommon/IBackendInternal.hpp>
 #include <backendsCommon/IMemoryManager.hpp>
+#include <reference/workloads/Decoders.hpp>
+#include <reference/workloads/Encoders.hpp>
+#include <test/TensorHelpers.hpp>
 
 #include <boost/multi_array.hpp>
 #include <boost/assert.hpp>
@@ -793,7 +799,8 @@
     float upperBound,
     float lowerBound);
 
-LayerTestResult<uint8_t, 2> FullyConnectedUint8Test(
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> FullyConnectedTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool biasEnabled);
@@ -1638,3 +1645,139 @@
 LayerTestResult<int16_t, 4> QuantizeClampInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template<typename T, typename B>
+LayerTestResult<T, 2> SimpleFullyConnectedTestImpl(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        armnn::TensorInfo inputTensorInfo,
+        armnn::TensorInfo outputTensorInfo,
+        armnn::TensorInfo weightsDesc,
+        armnn::TensorInfo biasesDesc,
+        boost::multi_array<T, 2>& weights,
+        boost::multi_array<B, 1>& bias,
+        boost::multi_array<T, 4>& input,
+        bool biasEnabled,
+        bool transposeWeights)
+{
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::FullyConnectedQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    armnn::ScopedCpuTensorHandle weightsTensor(weightsDesc);
+    armnn::ScopedCpuTensorHandle biasTensor(biasesDesc);
+
+    AllocateAndCopyDataToITensorHandle(&weightsTensor, &weights[0][0]);
+    AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
+
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+    data.m_Weight = &weightsTensor;
+    data.m_Bias = &biasTensor;
+    data.m_Parameters.m_BiasEnabled = biasEnabled;
+    data.m_Parameters.m_TransposeWeightMatrix = transposeWeights;
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFullyConnected(data, info);
+    LayerTestResult<T, 2> result(outputTensorInfo);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    ExecuteWorkload(*workload, memoryManager);
+
+    CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
+
+    return result;
+}
+
+template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+std::vector<T> ConvertToDataType(const std::vector<float>& input,
+                                 const armnn::TensorInfo& inputTensorInfo)
+{
+    std::vector<T> output(input.size());
+    auto outputTensorInfo = inputTensorInfo;
+    outputTensorInfo.SetDataType(ArmnnType);
+
+    std::unique_ptr<armnn::Encoder<float>> pOutputEncoder = armnn::MakeEncoder<float>(outputTensorInfo, output.data());
+    armnn::Encoder<float>& rOutputEncoder = *pOutputEncoder;
+
+    for (auto it = input.begin(); it != input.end(); ++it)
+    {
+        rOutputEncoder.Set(*it);
+        ++rOutputEncoder;
+    }
+    return output;
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> FullyConnectedTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        bool biasEnabled)
+{
+    constexpr static unsigned int inputWidth = 3u;
+    constexpr static unsigned int inputHeight = 2u;
+    constexpr static unsigned int inputChannels = 1u;
+
+    constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels;
+
+    constexpr static unsigned int outputChannels = 2u;
+
+    armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, ArmnnType);
+    inputTensorInfo.SetQuantizationScale(0.1f);
+    inputTensorInfo.SetQuantizationOffset(63);
+
+    armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, ArmnnType);
+    outputTensorInfo.SetQuantizationScale(5.f);
+    outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10);
+
+    armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, ArmnnType);
+    weightsDesc.SetQuantizationScale(0.2f);
+    weightsDesc.SetQuantizationOffset(93);
+
+    armnn::TensorInfo biasesDesc({ outputChannels }, GetBiasTypeFromWeightsType(weightsDesc.GetDataType()).value());
+    biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale());
+    biasesDesc.SetQuantizationOffset(0);
+
+    LayerTestResult<T, 2> result(outputTensorInfo);
+
+    auto input = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(
+        {
+            -1.2f, 6.1f, -3.5f,
+            18.8f, -5.5f, 2.9f
+        },
+        inputTensorInfo));
+
+    auto weights = MakeTensor<T, 2>(weightsDesc, ConvertToDataType<ArmnnType>(
+        {
+            -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f,
+            23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f
+        },
+        weightsDesc));
+
+    auto bias = MakeTensor<int32_t, 1>(biasesDesc, std::vector<int32_t>{9250, 67500});
+
+    result = SimpleFullyConnectedTestImpl<T>(
+            workloadFactory,
+            memoryManager,
+            inputTensorInfo, outputTensorInfo,
+            weightsDesc, biasesDesc,
+            weights, bias, input,
+            biasEnabled, true
+    );
+
+    if (biasEnabled)
+    {
+        result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
+                                                 ConvertToDataType<ArmnnType>({80.f, 1460.f}, outputTensorInfo));
+    }
+    else
+    {
+        result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
+                                                 ConvertToDataType<ArmnnType>({-107.04f, 110.f}, outputTensorInfo));
+    }
+
+    return result;
+}
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/WorkloadTestUtils.hpp b/src/backends/backendsCommon/test/WorkloadTestUtils.hpp
index 212fea3..2f3e786 100644
--- a/src/backends/backendsCommon/test/WorkloadTestUtils.hpp
+++ b/src/backends/backendsCommon/test/WorkloadTestUtils.hpp
@@ -86,4 +86,26 @@
     }
 }
 
+inline armnn::Optional<armnn::DataType> GetBiasTypeFromWeightsType(armnn::Optional<armnn::DataType> weightsType)
+{
+    if (!weightsType)
+    {
+        return weightsType;
+    }
+
+    switch(weightsType.value())
+    {
+        case armnn::DataType::Float16:
+        case armnn::DataType::Float32:
+            return weightsType;
+        case armnn::DataType::QuantisedAsymm8:
+            return armnn::DataType::Signed32;
+        case armnn::DataType::QuantisedSymm16:
+            return armnn::DataType::Signed32;
+        default:
+            BOOST_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
+    }
+    return armnn::EmptyOptional();
+}
+
 } // anonymous namespace
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
index fe88b39..17be230 100644
--- a/src/backends/cl/test/ClLayerTests.cpp
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -61,8 +61,8 @@
 ARMNN_AUTO_TEST_CASE(SimpleFullyConnected, FullyConnectedFloat32Test, false, false)
 ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithBias, FullyConnectedFloat32Test, true, false)
 ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithTranspose, FullyConnectedFloat32Test, false, true)
-ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedUint8Test, false)
-ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedUint8Test, true)
+ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedTest<armnn::DataType::QuantisedAsymm8>, false)
+ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedTest<armnn::DataType::QuantisedAsymm8>, true)
 
 ARMNN_AUTO_TEST_CASE(FullyConnectedLarge, FullyConnectedLargeTest, false)
 ARMNN_AUTO_TEST_CASE(FullyConnectedLargeTransposed, FullyConnectedLargeTest, true)
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index 48e411d..1ea227a 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -314,8 +314,8 @@
 ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithTranspose, FullyConnectedFloat32Test, false, true)
 ARMNN_AUTO_TEST_CASE(FullyConnectedLarge, FullyConnectedLargeTest, false)
 ARMNN_AUTO_TEST_CASE(FullyConnectedLargeTransposed, FullyConnectedLargeTest, true)
-ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedUint8Test, false)
-ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedUint8Test, true)
+ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedTest<armnn::DataType::QuantisedAsymm8>, false)
+ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedTest<armnn::DataType::QuantisedAsymm8>, true)
 
 // Add
 ARMNN_AUTO_TEST_CASE(SimpleAdd, AdditionTest)
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index b6da628..9be1ed6 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -12,6 +12,7 @@
 #include <armnn/Descriptors.hpp>
 
 #include <backendsCommon/BackendRegistry.hpp>
+#include <backendsCommon/test/WorkloadTestUtils.hpp>
 
 #include <boost/core/ignore_unused.hpp>
 
@@ -108,9 +109,29 @@
     TypeAnyOf(const TensorInfo& info, const Container& c)
     {
         m_Res = std::any_of(c.begin(), c.end(), [&info](DataType dt)
-            {
-                return dt == info.GetDataType();
-            });
+        {
+            return dt == info.GetDataType();
+        });
+    }
+};
+
+struct BiasAndWeightsTypesMatch : public Rule
+{
+    BiasAndWeightsTypesMatch(const TensorInfo& biases, const TensorInfo& weights)
+    {
+        m_Res = biases.GetDataType() == GetBiasTypeFromWeightsType(weights.GetDataType()).value();
+    }
+};
+
+struct BiasAndWeightsTypesCompatible : public Rule
+{
+    template<typename Container>
+    BiasAndWeightsTypesCompatible(const TensorInfo& info, const Container& c)
+    {
+        m_Res = std::any_of(c.begin(), c.end(), [&info](DataType dt)
+        {
+            return dt ==  GetBiasTypeFromWeightsType(info.GetDataType()).value();
+        });
     }
 };
 
@@ -569,14 +590,53 @@
                                                 const FullyConnectedDescriptor& descriptor,
                                                 Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(output);
-    ignore_unused(weights);
-    ignore_unused(biases);
-    ignore_unused(descriptor);
-    return IsSupportedForDataTypeRef(reasonIfUnsupported,
-                                     input.GetDataType(),
-                                     &TrueFunc<>,
-                                     &TrueFunc<>);
+    bool supported = true;
+
+    // Define supported types.
+    std::array<DataType,3> supportedTypes =
+    {
+            DataType::Float32,
+            DataType::QuantisedAsymm8,
+            DataType::QuantisedSymm16
+    };
+
+    supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
+                                  "Reference Fully Connected: input type not supported.");
+
+    supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
+                                  "Reference Fully Connected: output type not supported.");
+
+    supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
+                                  "Reference Fully Connected: input and output types mismatched.");
+
+    supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
+                                  "Reference Fully Connected: weights type not supported.");
+
+    supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
+                                  "Reference Fully Connected: input and weight types mismatched.");
+
+    if (descriptor.m_BiasEnabled)
+    {
+        // Defined supported types for bias
+        std::array<DataType, 2>
+        supportedBiasTypes =
+        {
+            DataType::Float32,
+            DataType::Signed32
+        };
+
+        supported &= CheckSupportRule(TypeAnyOf(biases, supportedBiasTypes), reasonIfUnsupported,
+                                      "Reference Fully Connected: bias type not supported.");
+
+        supported &= CheckSupportRule(BiasAndWeightsTypesMatch(biases, weights), reasonIfUnsupported,
+                                      "Reference Fully Connected: bias and weight types mismatch.");
+
+        supported &= CheckSupportRule(BiasAndWeightsTypesCompatible(weights, supportedBiasTypes), reasonIfUnsupported,
+                                      "Reference Fully Connected: bias type inferred from weights is incompatible.");
+
+    }
+
+    return supported;
 }
 
 bool RefLayerSupport::IsGatherSupported(const armnn::TensorInfo& input0,
diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp
index 48b85cb..95da7ab 100644
--- a/src/backends/reference/test/RefCreateWorkloadTests.cpp
+++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp
@@ -327,6 +327,11 @@
     RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QuantisedAsymm8>();
 }
 
+BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedAsymm16)
+{
+    RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QuantisedSymm16>();
+}
+
 template <typename NormalizationWorkloadType, armnn::DataType DataType>
 static void RefCreateNormalizationWorkloadTest(DataLayout dataLayout)
 {
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index ed8f02f..1207c1d 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -234,11 +234,13 @@
 ARMNN_AUTO_TEST_CASE(TanhInt16, TanhInt16Test)
 
 
-// Fully Conected
+// Fully Connected
 ARMNN_AUTO_TEST_CASE(SimpleFullyConnected, FullyConnectedFloat32Test, false, false)
-ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedUint8Test, false)
+ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedTest<armnn::DataType::QuantisedAsymm8>, false)
+ARMNN_AUTO_TEST_CASE(FullyConnectedQSymm16, FullyConnectedTest<armnn::DataType::QuantisedSymm16>, false)
 ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithBias, FullyConnectedFloat32Test, true, false)
-ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedUint8Test, true)
+ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedTest<armnn::DataType::QuantisedAsymm8>, true)
+ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedQSymm16, FullyConnectedTest<armnn::DataType::QuantisedSymm16>, true)
 ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithTranspose, FullyConnectedFloat32Test, false, true)
 
 ARMNN_AUTO_TEST_CASE(FullyConnectedLarge, FullyConnectedLargeTest, false)