IVGCVSW-4246 Clean build of LayerTests with -Wextra

Change-Id: If3d98e45a6e2fa8e1afd19a4052334335feacf63
Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
diff --git a/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp
index 31d102c..ff76a38 100644
--- a/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp
@@ -25,6 +25,7 @@
     const std::vector<float>& inputValues,
     const std::vector<float>& expectedOutputValues)
 {
+    boost::ignore_unused(memoryManager);
     auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputValues,inputTensorInfo));
 
     LayerTestResult<T, 2> result(outputTensorInfo);
@@ -108,6 +109,8 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
+
     const armnn::TensorShape inputShape{ 3, 1, 2 };
     const armnn::TensorShape outputShape{ 3, 1, 2 };
 
diff --git a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
index a45c6d5..2f2d8db 100644
--- a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
@@ -37,6 +37,7 @@
     unsigned int inputChannels,
     unsigned int inputBatchSize)
 {
+    boost::ignore_unused(memoryManager);
     unsigned int outputWidth = inputWidth;
     unsigned int outputHeight = inputHeight;
     unsigned int outputChannels = inputChannels;
@@ -245,6 +246,7 @@
     float upperBound,
     const armnn::ActivationDescriptor& activationDescriptor)
 {
+    boost::ignore_unused(memoryManager);
     const armnn::TensorInfo inputTensorInfo = BoundedReLuRandomInputTestTraits::GetInputTensorInfo();
     const armnn::TensorInfo outputTensorInfo = BoundedReLuRandomInputTestTraits::GetOutputTensorInfo();
 
@@ -309,6 +311,7 @@
     float qScale = 0.0f,
     int32_t qOffset = 0)
 {
+    boost::ignore_unused(memoryManager);
     unsigned int inputHeight    = 20;
     unsigned int inputWidth     = 17;
     unsigned int inputChannels  = 3;
@@ -400,6 +403,7 @@
     int32_t outOffset,
     const std::vector<float>& outputExpectedData)
 {
+    boost::ignore_unused(memoryManager);
     constexpr static unsigned int inputWidth = 16u;
     constexpr static unsigned int inputHeight = 1u;
     constexpr static unsigned int inputChannels = 1u;
@@ -790,6 +794,7 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
     const int inputDataSize = 120;
     std::vector<float> inputData(inputDataSize);
 
@@ -1023,6 +1028,7 @@
     float qScale = 0.0f,
     int32_t qOffset = 0)
 {
+    boost::ignore_unused(memoryManager);
     unsigned int width     = 17;
     unsigned int height    = 29;
     unsigned int channels  = 2;
diff --git a/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp
index 247821b..a3a21ab 100644
--- a/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp
@@ -165,6 +165,7 @@
     float qScale,
     int32_t qOffset)
 {
+    boost::ignore_unused(memoryManager);
     armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
     armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
     armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
@@ -247,6 +248,7 @@
     float qScale,
     int32_t qOffset)
 {
+    boost::ignore_unused(memoryManager);
     armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
     armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
     armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
@@ -453,6 +455,8 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
+
     // Create Initial Tensor
     // 1, 2, 3
     // 4, 5, 6
@@ -559,6 +563,7 @@
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     armnn::IWorkloadFactory& refWorkloadFactory)
 {
+    boost::ignore_unused(memoryManager);
     unsigned int batchSize = 4;
     unsigned int channels  = 1;
     unsigned int height    = 2;
diff --git a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
index 18c9e54..2733100 100644
--- a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
@@ -27,6 +27,7 @@
         const std::vector<int32_t>& outputData,
         int axis = 3)
 {
+    boost::ignore_unused(memoryManager);
     auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputData, inputTensorInfo));
 
     LayerTestResult<int32_t, 3> result(outputTensorInfo);
diff --git a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
index 8471456..7857b35 100644
--- a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
@@ -37,6 +37,7 @@
     int32_t qOffset,
     armnn::DataLayout dataLayout)
 {
+    boost::ignore_unused(memoryManager);
     armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType);
     armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType);
 
@@ -115,6 +116,8 @@
     float qScale,
     int32_t qOffset)
 {
+    boost::ignore_unused(memoryManager);
+
     const unsigned int width    = 2;
     const unsigned int height   = 3;
     const unsigned int channels = 2;
@@ -587,6 +590,7 @@
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     armnn::IWorkloadFactory& refWorkloadFactory)
 {
+    boost::ignore_unused(memoryManager);
     const unsigned int width     = 2;
     const unsigned int height    = 3;
     const unsigned int channels  = 5;
diff --git a/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp
index 79462d1..a5016ff 100644
--- a/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp
@@ -40,6 +40,8 @@
         float scale = 1.0f,
         int32_t offset = 0)
 {
+    boost::ignore_unused(memoryManager);
+
     armnn::TensorInfo inputTensorInfo(InputDim, inputShape, ArmnnType);
     armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, ArmnnType);
 
diff --git a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
index 1c54b85..c8272f4 100644
--- a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
@@ -44,6 +44,7 @@
     float outQuantScale,
     int outQuantOffset)
 {
+    boost::ignore_unused(memoryManager);
     BOOST_ASSERT(shape0.GetNumDimensions() == NumDims);
     armnn::TensorInfo inputTensorInfo0(shape0, ArmnnInType, quantScale0, quantOffset0);
 
diff --git a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
index 53bfc20..57ed754 100644
--- a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
@@ -127,6 +127,7 @@
     const T * inputData,
     std::vector<T>& outputData)
 {
+    boost::ignore_unused(memoryManager);
     BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
     if (inputData == nullptr)
     {
@@ -178,6 +179,7 @@
     unsigned int & concatDim,
     TensorInfo & outputTensorInfo)
 {
+    boost::ignore_unused(memoryManager);
     BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
         "Expecting more than one tensor to be concatenated here");
 
@@ -1917,6 +1919,8 @@
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor)
 {
+    boost::ignore_unused(memoryManager);
+
     // Defines the tensor descriptors.
     TensorInfo outputTensorInfo({ 3, 6, 3 }, ArmnnType);
     TensorInfo inputTensorInfo1({ 3, 6, 2 }, ArmnnType);
@@ -2070,6 +2074,8 @@
     IWorkloadFactory& workloadFactory,
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
+
     unsigned int outputWidth = 3;
     unsigned int outputHeight = 6;
     unsigned int outputChannels = 3;
@@ -2341,6 +2347,8 @@
     IWorkloadFactory& workloadFactory,
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
+
     unsigned int outputWidth = 3;
     unsigned int outputHeight = 6;
     unsigned int outputChannels = 3;
@@ -2484,6 +2492,8 @@
     IWorkloadFactory& workloadFactory,
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
+
     unsigned int outputWidth = 3;
     unsigned int outputHeight = 6;
     unsigned int outputChannels = 3;
@@ -2620,6 +2630,8 @@
         IWorkloadFactory& workloadFactory,
         const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
+
     unsigned int outputWidth = 3;
     unsigned int outputHeight = 6;
     unsigned int outputChannels = 3;
diff --git a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
index 1790819..cfb6263 100644
--- a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
@@ -29,6 +29,7 @@
     float qScale,
     int32_t qOffset)
 {
+    boost::ignore_unused(memoryManager);
     constexpr unsigned int inputWidth = 3;
     constexpr unsigned int inputHeight = 4;
     constexpr unsigned int inputChannels = 3;
diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
index e825fff..a00fda7 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
@@ -218,6 +218,7 @@
     uint32_t dilationX = 1,
     uint32_t dilationY = 1)
 {
+    boost::ignore_unused(memoryManager);
     unsigned int inputHeight   = boost::numeric_cast<unsigned int>(originalInput.shape()[2]);
     unsigned int inputWidth    = boost::numeric_cast<unsigned int>(originalInput.shape()[3]);
     unsigned int inputChannels = boost::numeric_cast<unsigned int>(originalInput.shape()[1]);
@@ -381,6 +382,7 @@
     uint32_t strideX  = 1,
     uint32_t strideY  = 1)
 {
+    boost::ignore_unused(qScale, qOffset);
     unsigned int inputNum       = boost::numeric_cast<unsigned int>(input.shape()[0]);
     unsigned int inputChannels  = boost::numeric_cast<unsigned int>(input.shape()[3]);
     unsigned int inputHeight    = boost::numeric_cast<unsigned int>(input.shape()[1]);
@@ -586,6 +588,7 @@
     bool biasEnabled,
     armnn::DataLayout dataLayout)
 {
+    boost::ignore_unused(biasEnabled);
     // Use common single-batch 5x5 image.
 
     armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
@@ -638,6 +641,8 @@
         bool biasEnabled,
         const armnn::DataLayout& dataLayout)
 {
+    boost::ignore_unused(biasEnabled);
+
     // Input is a single-batch, 1 channel, 5x5 image.
     armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
     boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp
index 61eb03a..5fe644e 100644
--- a/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp
@@ -18,6 +18,7 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
     using namespace half_float::literal;
 
     const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp
index e5184e0..f758f4b 100644
--- a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp
@@ -16,6 +16,7 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
     using namespace half_float::literal;
 
     const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
diff --git a/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
index 023bbae..a4db568 100644
--- a/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
@@ -31,6 +31,7 @@
     const float qScale = 1.0f,
     const int32_t qOffset = 0)
 {
+    boost::ignore_unused(memoryManager);
     if(armnn::IsQuantizedType<T>())
     {
         inputTensorInfo.SetQuantizationScale(qScale);
diff --git a/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
index 4e8c938..5e5cba3 100644
--- a/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
@@ -30,6 +30,7 @@
     const float qScale = 1.0f,
     const int32_t qOffset = 0)
 {
+    boost::ignore_unused(memoryManager);
     if (descriptor.m_Parameters.m_DataLayout == armnn::DataLayout::NCHW)
     {
         PermuteTensorNhwcToNchw<float>(inputInfo, inputData);
diff --git a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp
index 844b109..fb225ae 100644
--- a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp
@@ -27,6 +27,7 @@
         const std::vector<T1>& expectedOutputData,
         armnn::DequantizeQueueDescriptor descriptor)
 {
+    boost::ignore_unused(memoryManager);
     boost::multi_array<T, Dim> input = MakeTensor<T, Dim>(inputTensorInfo, inputData);
 
     LayerTestResult<T1, Dim> ret(outputTensorInfo);
diff --git a/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp
index 9a110a3..b908f96 100644
--- a/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp
@@ -20,6 +20,7 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
     const unsigned int width        = 2u;
     const unsigned int height       = 2u;
     const unsigned int channelCount = 2u;
diff --git a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
index 1ce9d2d..d25673b 100644
--- a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
@@ -18,6 +18,7 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
     constexpr unsigned int width = 2;
     constexpr unsigned int height = 3;
 
diff --git a/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp
index 40ed8a2..a3d29da 100644
--- a/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp
@@ -16,6 +16,7 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
     armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, ArmnnType);
     inputTensorInfo.SetQuantizationScale(0.1f);
 
diff --git a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
index cf101ee..a3fe858 100644
--- a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
@@ -35,6 +35,7 @@
         bool biasEnabled,
         bool transposeWeights)
 {
+    boost::ignore_unused(memoryManager);
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
diff --git a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp
index 5e38e48..1ccf51c 100644
--- a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp
@@ -32,6 +32,7 @@
     const std::vector<int32_t>& indicesData,
     const std::vector<T>& outputData)
 {
+    boost::ignore_unused(memoryManager);
     auto params  = MakeTensor<T, ParamsDim>(paramsInfo, paramsData);
     auto indices = MakeTensor<int32_t, IndicesDim>(indicesInfo, indicesData);
 
diff --git a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
index c734a2d..468a34d 100644
--- a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
@@ -35,6 +35,7 @@
     float qScale = 0.0f,
     int32_t qOffset = 0)
 {
+    boost::ignore_unused(memoryManager);
     auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
                                         armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset));
 
diff --git a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
index d67f7b6..4b16921 100644
--- a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
@@ -33,6 +33,7 @@
     const armnn::DataLayout layout,
     float epsilon = 1e-12f)
 {
+    boost::ignore_unused(memoryManager);
     const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
     const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
 
diff --git a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
index 3a2f39a..dab8e49 100644
--- a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
@@ -37,6 +37,7 @@
     float qScale = 1.0f,
     int32_t qOffset = 0)
 {
+    boost::ignore_unused(memoryManager);
     LayerTestResult<T, NumDims> result(outputInfo);
     result.outputExpected =
         MakeTensor<T, NumDims>(outputInfo, armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
diff --git a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
index 6cea777..e755aa5 100644
--- a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
@@ -143,6 +143,7 @@
         int32_t qOffset = 0,
         armnn::DataType constantDataType = armnn::DataType::Float32)
 {
+    boost::ignore_unused(memoryManager);
     unsigned int batchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
     unsigned int inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
     unsigned int outputSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
@@ -345,6 +346,7 @@
                                                   int32_t qOffset = 0,
                                                   armnn::DataType constantDataType = armnn::DataType::Float32)
 {
+    boost::ignore_unused(memoryManager);
     unsigned int batchSize = 2;
     unsigned int outputSize = 16;
     unsigned int inputSize = 5;
@@ -1059,6 +1061,7 @@
         int32_t qOffset = 0,
         armnn::DataType constantDataType = armnn::DataType::Float32)
 {
+    boost::ignore_unused(memoryManager);
     bool cifgEnabled = true;
     bool peepholeEnabled = true;
     bool projectionEnabled = false;
@@ -1283,6 +1286,7 @@
                                                   int32_t qOffset = 0,
                                                   armnn::DataType constantDataType = armnn::DataType::Float32)
 {
+    boost::ignore_unused(memoryManager);
     unsigned int batchSize = 2;
     unsigned int outputSize = 3;
     unsigned int inputSize = 5;
@@ -1549,6 +1553,7 @@
     const boost::multi_array<uint8_t, 2>& input,
     const boost::multi_array<uint8_t, 2>& outputExpected)
 {
+    boost::ignore_unused(memoryManager);
     auto numBatches = boost::numeric_cast<unsigned int>(input.shape()[0]);
     auto inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
     auto outputSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
diff --git a/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp
index 07e2bef..0218697 100644
--- a/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp
@@ -19,6 +19,7 @@
 LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
                                            const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
     const unsigned int width        = 2u;
     const unsigned int height       = 2u;
     const unsigned int channelCount = 2u;
diff --git a/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp
index bab7337..b8eae1c 100644
--- a/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp
@@ -28,6 +28,8 @@
         float scale = 1.0f,
         int32_t offset = 0)
 {
+    boost::ignore_unused(memoryManager);
+
     armnn::TensorInfo inputTensorInfo(InputDim, inputShape, ArmnnType);
     armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, ArmnnType);
 
diff --git a/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp
index bf66950..ed12c7f 100644
--- a/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp
@@ -20,6 +20,7 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
     unsigned int shape0[] = { 1, 2, 2, 2 };
     unsigned int shape1[] = { 1, 1, 1, 1 };
 
diff --git a/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp
index 99b1b18..a39e6bd 100644
--- a/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp
@@ -401,6 +401,7 @@
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     armnn::IWorkloadFactory& refWorkloadFactory)
 {
+    boost::ignore_unused(memoryManager);
     const unsigned int width = 16;
     const unsigned int height = 32;
     const unsigned int channelCount = 2;
diff --git a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
index f65960f..ef82855 100644
--- a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
@@ -24,6 +24,7 @@
     armnn::NormalizationAlgorithmChannel normChannel,
     armnn::NormalizationAlgorithmMethod normMethod)
 {
+    boost::ignore_unused(memoryManager);
     const unsigned int inputHeight = 2;
     const unsigned int inputWidth = 2;
     const unsigned int inputChannels = 1;
diff --git a/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
index 0f9a30e..3a8d2b7 100644
--- a/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
@@ -24,6 +24,7 @@
     int32_t qOffset,
     const float customPaddingValue)
 {
+    boost::ignore_unused(memoryManager);
     const armnn::TensorShape inputShape{ 3, 3 };
     const armnn::TensorShape outputShape{ 7, 7 };
 
@@ -95,6 +96,7 @@
     float qScale,
     int32_t qOffset)
 {
+    boost::ignore_unused(memoryManager);
     const armnn::TensorShape inputShape{ 2, 2, 2 };
     const armnn::TensorShape outputShape{ 3, 5, 6 };
 
@@ -178,6 +180,7 @@
     float qScale,
     int32_t qOffset)
 {
+    boost::ignore_unused(memoryManager);
     const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
     const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
 
diff --git a/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp
index c5039a1..83746f3 100644
--- a/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp
@@ -26,6 +26,7 @@
         const std::vector<T>& inputData,
         const std::vector<T>& outputExpectedData)
 {
+    boost::ignore_unused(memoryManager);
     auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
 
     LayerTestResult<T, 4> ret(outputTensorInfo);
diff --git a/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
index 2012dfd..160e658 100644
--- a/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
@@ -38,6 +38,7 @@
     const boost::multi_array<T, 4>& input,
     const boost::multi_array<T, 4>& outputExpected)
 {
+    boost::ignore_unused(memoryManager);
     const armnn::DataLayout dataLayout = descriptor.m_DataLayout;
     const armnnUtils::DataLayoutIndexed dimensionIndices = dataLayout;
     auto heightIndex = dimensionIndices.GetHeightIndex();
@@ -739,6 +740,7 @@
     float qScale = 1.0f,
     int32_t qOffset = 0)
 {
+    boost::ignore_unused(memoryManager);
     const unsigned int inputWidth = 16;
     const unsigned int inputHeight = 32;
     const unsigned int channelCount = 2;
diff --git a/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp
index 99f3cd1..2021349 100644
--- a/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp
@@ -25,6 +25,8 @@
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
+
     armnn::TensorInfo inputTensorInfo ({ 1, 2, 2, 3 }, ArmnnType);
     armnn::TensorInfo alphaTensorInfo ({ 1, 1, 1, 3 }, ArmnnType);
     armnn::TensorInfo outputTensorInfo({ 1, 2, 2, 3 }, ArmnnType);
diff --git a/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp
index 481f681..e23f92a 100644
--- a/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp
@@ -30,6 +30,7 @@
     const std::vector<T>& expectedOutputData,
     armnn::QuantizeQueueDescriptor descriptor)
 {
+    boost::ignore_unused(memoryManager);
     boost::multi_array<float, Dim> input = MakeTensor<float, Dim>(inputTensorInfo, inputData);
 
     LayerTestResult<T, Dim> ret(outputTensorInfo);
diff --git a/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp
index bce24f0..485e7ea 100644
--- a/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp
@@ -23,6 +23,7 @@
     const std::vector<T>& inputData,
     const std::vector<T>& outputExpectedData)
 {
+    boost::ignore_unused(memoryManager);
     auto input = MakeTensor<T, NumDims>(inputTensorInfo, inputData);
 
     LayerTestResult<T, NumDims> ret(outputTensorInfo);
diff --git a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp
index 198c60a..080155e 100644
--- a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp
@@ -74,6 +74,7 @@
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const ResizeTestParams& params)
 {
+    boost::ignore_unused(memoryManager);
     armnn::TensorInfo inputInfo(params.m_InputShape, ArmnnType);
     armnn::TensorInfo outputInfo(params.m_OutputShape, ArmnnType);
 
diff --git a/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp
index 3adb797..24a3b21 100644
--- a/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp
@@ -25,6 +25,7 @@
     const std::vector<float>& inputValues,
     const std::vector<float>& expectedOutputValues)
 {
+    boost::ignore_unused(memoryManager);
     auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputValues,inputTensorInfo));
 
     LayerTestResult<T, 2> result(outputTensorInfo);
@@ -97,6 +98,7 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
     const armnn::TensorShape inputShape{ 3, 1, 2 };
     const armnn::TensorShape outputShape{ 3, 1, 2 };
 
diff --git a/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp
index a60b189..65b1716 100644
--- a/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp
@@ -30,6 +30,7 @@
     const float qScale = 1.0f,
     const int qOffset = 0)
 {
+    boost::ignore_unused(memoryManager);
     if(armnn::IsQuantizedType<T>())
     {
         inputInfo.SetQuantizationScale(qScale);
diff --git a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
index a5f6477..2a1aa76 100644
--- a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
@@ -66,6 +66,7 @@
     const std::vector<float>& inputData,
     int axis = 1)
 {
+    boost::ignore_unused(memoryManager);
     using std::exp;
 
     const float qScale = 1.f / 256.f;
diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
index ea2130d..2793875 100644
--- a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
@@ -32,6 +32,7 @@
     const float qScale = 1.0f,
     const int32_t qOffset = 0)
 {
+    boost::ignore_unused(memoryManager);
     const armnn::PermutationVector NCHWToNHWC = {0, 3, 1, 2};
     if (descriptor.m_Parameters.m_DataLayout == armnn::DataLayout::NHWC)
     {
diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp
index c4b11a7..b6bf530 100644
--- a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp
@@ -32,6 +32,7 @@
     const float qScale = 1.0f,
     const int32_t qOffset = 0)
 {
+    boost::ignore_unused(memoryManager);
     const armnn::PermutationVector NHWCToNCHW = {0, 2, 3, 1};
 
     if (descriptor.m_Parameters.m_DataLayout == armnn::DataLayout::NCHW)
diff --git a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
index 7aebdd0..c8c2f9c 100644
--- a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
@@ -25,6 +25,7 @@
     float qScale = 0.0f,
     int32_t qOffset = 0)
 {
+    boost::ignore_unused(memoryManager);
     unsigned int inputWidth = 5;
     unsigned int inputHeight = 6;
     unsigned int inputChannels = 3;
@@ -257,6 +258,7 @@
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale, int32_t qOffset)
 {
+    boost::ignore_unused(memoryManager);
     const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, ArmnnType, qScale, qOffset);
     auto input = MakeTensor<T, 3>(
         tensorInfo,
diff --git a/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp
index db9038e..1bf4967 100644
--- a/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp
@@ -31,6 +31,7 @@
         const std::vector<std::vector<T>>& inputData,
         const std::vector<T>& outputExpectedData)
 {
+    boost::ignore_unused(memoryManager);
     unsigned int numInputs = static_cast<unsigned int>(inputData.size());
     std::vector<boost::multi_array<T, outputDimLength-1>> inputs;
     for (unsigned int i = 0; i < numInputs; ++i)
diff --git a/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp
index 8082be4..23f5df0 100644
--- a/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp
@@ -30,6 +30,7 @@
     const float qScale = 1.0f,
     const int32_t qOffset = 0)
 {
+    boost::ignore_unused(memoryManager);
     if(armnn::IsQuantizedType<T>())
     {
         inputTensorInfo.SetQuantizationScale(qScale);
diff --git a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
index 71b08aa..4b4894f 100644
--- a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
@@ -52,6 +52,7 @@
                                     const TensorData<T>& weights,
                                     const armnn::Optional<TensorData<BT>>& biases)
 {
+    boost::ignore_unused(memoryManager);
     using namespace armnn;
 
     VerifyInputTensorData(input, "input");