IVGCVSW-4979 'Remove CreateTensorHandle using WorkloadFactory in workload tests'

* Small refactor in unit tests using TensorHandleFactory to use reference instead of pointer

Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: I1a702941890034a45029c014c8b11e185f45a807
diff --git a/src/armnn/test/UnitTests.hpp b/src/armnn/test/UnitTests.hpp
index c049f57..a77e255 100644
--- a/src/armnn/test/UnitTests.hpp
+++ b/src/armnn/test/UnitTests.hpp
@@ -88,7 +88,7 @@
 
     auto tensorHandleFactory = WorkloadFactoryHelper<FactoryType>::GetTensorHandleFactory(memoryManager);
 
-    auto testResult = (*testFunction)(workloadFactory, memoryManager, &tensorHandleFactory, args...);
+    auto testResult = (*testFunction)(workloadFactory, memoryManager, tensorHandleFactory, args...);
     CompareTestResultIfSupported(testName, testResult);
 
     armnn::ProfilerManager::GetInstance().RegisterProfiler(nullptr);
@@ -136,7 +136,7 @@
         RefWorkloadFactoryHelper::GetTensorHandleFactory(memoryManager);
 
     auto testResult = (*testFunction)(
-        workloadFactory, memoryManager, refWorkloadFactory, &tensorHandleFactory, &refTensorHandleFactory, args...);
+        workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, args...);
     CompareTestResultIfSupported(testName, testResult);
 }
 
diff --git a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
index 153d726..d63cc04 100644
--- a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
@@ -19,7 +19,7 @@
 LayerTestResult<int32_t, 3> ArgMinMaxTestCommon(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
-        armnn::ITensorHandleFactory* tensorHandleFactory,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         armnn::ArgMinMaxFunction argMinMaxFunction,
         const armnn::TensorInfo inputTensorInfo,
         const armnn::TensorInfo outputTensorInfo,
@@ -32,8 +32,8 @@
     LayerTestResult<int32_t, 3> result(outputTensorInfo);
     result.outputExpected = MakeTensor<int32_t, 3>(outputTensorInfo, outputData);
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle  = tensorHandleFactory->CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory->CreateTensorHandle(outputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle  = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
 
     armnn::ArgMinMaxQueueDescriptor descriptor;
     descriptor.m_Parameters.m_Function = argMinMaxFunction;
@@ -64,7 +64,7 @@
 LayerTestResult<int32_t, 3> ArgMaxSimpleTest(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory)
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
     const armnn::TensorShape outputShape{ 1, 1, 1 };
@@ -92,7 +92,7 @@
 LayerTestResult<int32_t, 3> ArgMinSimpleTest(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory)
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
     const armnn::TensorShape outputShape{ 1, 1, 1 };
@@ -120,7 +120,7 @@
 LayerTestResult<int32_t, 3> ArgMinChannelTest(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory)
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     const armnn::TensorShape inputShape{ 1, 3, 2, 4};
     const armnn::TensorShape outputShape{ 1, 2, 4 };
@@ -156,7 +156,7 @@
 LayerTestResult<int32_t, 3> ArgMaxChannelTest(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory)
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     const armnn::TensorShape inputShape{ 1, 3, 2, 4};
     const armnn::TensorShape outputShape{ 1, 2, 4 };
@@ -192,7 +192,7 @@
 LayerTestResult<int32_t, 3> ArgMaxHeightTest(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory)
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     const armnn::TensorShape inputShape{ 1, 3, 2, 4};
     const armnn::TensorShape outputShape{ 1, 3, 4 };
@@ -228,7 +228,7 @@
 LayerTestResult<int32_t, 3> ArgMinWidthTest(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory)
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     const armnn::TensorShape inputShape{ 1, 3, 2, 4};
     const armnn::TensorShape outputShape{ 1, 3, 2 };
@@ -267,202 +267,202 @@
 ArgMaxSimpleTest<armnn::DataType::Float32>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMaxSimpleTest<armnn::DataType::Float16>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMaxSimpleTest<armnn::DataType::QAsymmS8>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMaxSimpleTest<armnn::DataType::QAsymmU8>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMaxSimpleTest<armnn::DataType::QSymmS16>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMaxSimpleTest<armnn::DataType::Signed32>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMinSimpleTest<armnn::DataType::Float32>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMinSimpleTest<armnn::DataType::Float16>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMinSimpleTest<armnn::DataType::QAsymmS8>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMinSimpleTest<armnn::DataType::QAsymmU8>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMinSimpleTest<armnn::DataType::QSymmS16>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMinSimpleTest<armnn::DataType::Signed32>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMinChannelTest<armnn::DataType::Float32>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMinChannelTest<armnn::DataType::Float16>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMinChannelTest<armnn::DataType::QAsymmS8>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMinChannelTest<armnn::DataType::QAsymmU8>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMinChannelTest<armnn::DataType::QSymmS16>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMinChannelTest<armnn::DataType::Signed32>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMaxChannelTest<armnn::DataType::Float32>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMaxChannelTest<armnn::DataType::Float16>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMaxChannelTest<armnn::DataType::QAsymmS8>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMaxChannelTest<armnn::DataType::QAsymmU8>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMaxChannelTest<armnn::DataType::QSymmS16>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMaxChannelTest<armnn::DataType::Signed32>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMaxHeightTest<armnn::DataType::Float32>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMaxHeightTest<armnn::DataType::Float16>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMaxHeightTest<armnn::DataType::Signed32>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMaxHeightTest<armnn::DataType::QAsymmS8>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMaxHeightTest<armnn::DataType::QAsymmU8>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMinWidthTest<armnn::DataType::Float32>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMinWidthTest<armnn::DataType::Float16>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMinWidthTest<armnn::DataType::Signed32>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMinWidthTest<armnn::DataType::QAsymmS8>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<int32_t, 3>
 ArgMinWidthTest<armnn::DataType::QAsymmU8>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
diff --git a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.hpp
index 2f3483e..47c34a7 100644
--- a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.hpp
@@ -16,29 +16,29 @@
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
 LayerTestResult<int32_t, 3> ArgMaxSimpleTest(armnn::IWorkloadFactory& workloadFactory,
                                              const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-                                             armnn::ITensorHandleFactory* tensorHandleFactory);
+                                             const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
 LayerTestResult<int32_t, 3> ArgMinSimpleTest(armnn::IWorkloadFactory& workloadFactory,
                                              const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-                                             armnn::ITensorHandleFactory* tensorHandleFactory);
+                                             const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
 LayerTestResult<int32_t, 3> ArgMinChannelTest(armnn::IWorkloadFactory& workloadFactory,
                                               const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-                                              armnn::ITensorHandleFactory* tensorHandleFactory);
+                                              const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
 LayerTestResult<int32_t, 3> ArgMaxChannelTest(armnn::IWorkloadFactory& workloadFactory,
                                               const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-                                              armnn::ITensorHandleFactory* tensorHandleFactory);
+                                              const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
 LayerTestResult<int32_t, 3> ArgMaxHeightTest(armnn::IWorkloadFactory& workloadFactory,
                                              const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-                                             armnn::ITensorHandleFactory* tensorHandleFactory);
+                                             const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
 LayerTestResult<int32_t, 3> ArgMinWidthTest(armnn::IWorkloadFactory& workloadFactory,
                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-                                            armnn::ITensorHandleFactory* tensorHandleFactory);
\ No newline at end of file
+                                            const armnn::ITensorHandleFactory& tensorHandleFactory);
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
index 8c535a6..f864400 100644
--- a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
@@ -16,7 +16,7 @@
 LayerTestResult<float, 2> FakeQuantizationTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory)
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     IgnoreUnused(memoryManager);
     constexpr unsigned int width = 2;
@@ -33,8 +33,8 @@
 
     LayerTestResult<float, 2> ret(tensorInfo);
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle   = tensorHandleFactory->CreateTensorHandle(tensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle  = tensorHandleFactory->CreateTensorHandle(tensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle   = tensorHandleFactory.CreateTensorHandle(tensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle  = tensorHandleFactory.CreateTensorHandle(tensorInfo);
 
     armnn::FakeQuantizationQueueDescriptor data;
     armnn::WorkloadInfo info;
diff --git a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.hpp
index 38f642e..47d8859 100644
--- a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.hpp
@@ -14,4 +14,4 @@
 LayerTestResult<float, 2> FakeQuantizationTest(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
diff --git a/src/backends/backendsCommon/test/layerTests/FillTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FillTestImpl.cpp
index f848cfd..f96d337 100644
--- a/src/backends/backendsCommon/test/layerTests/FillTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FillTestImpl.cpp
@@ -15,7 +15,7 @@
 LayerTestResult<T, 4> SimpleFillTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory)
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     IgnoreUnused(memoryManager);
     armnn::TensorInfo inputTensorInfo({4}, armnn::DataType::Signed32);
@@ -31,8 +31,8 @@
           1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f },
         outputTensorInfo));
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory->CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory->CreateTensorHandle(outputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
 
     armnn::FillQueueDescriptor data;
     data.m_Parameters.m_Value = 1.0f;
@@ -61,16 +61,16 @@
 SimpleFillTest<armnn::DataType::Float32>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory);
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
 SimpleFillTest<armnn::DataType::Float16>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory);
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<armnn::ResolveType<armnn::DataType::Signed32>, 4>
 SimpleFillTest<armnn::DataType::Signed32>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory);
\ No newline at end of file
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/layerTests/FillTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/FillTestImpl.hpp
index dab77a7..0eaffd1 100644
--- a/src/backends/backendsCommon/test/layerTests/FillTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/FillTestImpl.hpp
@@ -16,4 +16,4 @@
 LayerTestResult<T, 4> SimpleFillTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory);
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
diff --git a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
index d26b3db..c4cc914 100644
--- a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
@@ -59,7 +59,7 @@
 LayerTestResult<T, n> SimpleSoftmaxBaseTestImpl(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     float beta,
     const armnn::TensorShape& inputShape,
     const std::vector<float>& outputData,
@@ -87,8 +87,8 @@
     // Each row is independently softmax'd.
     auto input = MakeTensor<T, n>(inputTensorInfo, armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset));
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle  = tensorHandleFactory->CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory->CreateTensorHandle(outputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle  = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
 
     armnn::SoftmaxQueueDescriptor data;
     data.m_Parameters.m_Beta = beta;
@@ -120,7 +120,7 @@
 LayerTestResult<T, 2> SimpleSoftmaxTestImpl(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     float beta)
 {
     using std::exp;
@@ -150,7 +150,7 @@
 LayerTestResult<T, 2> SimpleSoftmaxTestImpl(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         float beta,
         int axis)
 {
@@ -206,7 +206,7 @@
 LayerTestResult<T, 3> Simple3dSoftmaxTestImpl(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     float beta,
     const armnn::TensorShape& inputShape,
     const std::vector<float>& outputData,
@@ -221,7 +221,7 @@
 LayerTestResult<T, 4> Simple4dSoftmaxTestImpl(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     float beta,
     const armnn::TensorShape& inputShape,
     const std::vector<float>& outputData,
@@ -238,8 +238,8 @@
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         armnn::IWorkloadFactory& refWorkloadFactory,
-        const armnn::ITensorHandleFactory* tensorHandleFactory,
-        const armnn::ITensorHandleFactory* refTensorHandleFactory,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
+        const armnn::ITensorHandleFactory& refTensorHandleFactory,
         float beta)
 {
     const int batchSize = 20;
@@ -263,8 +263,8 @@
     LayerTestResult<T, 2> ret(outputTensorInfo);
     auto input = MakeRandomTensor<T, 2>(inputTensorInfo, 0xF00D, 0.0f, 1.0f);
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle  = tensorHandleFactory->CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory->CreateTensorHandle(outputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle  = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
 
     armnn::SoftmaxQueueDescriptor data;
     data.m_Parameters.m_Beta = beta;
@@ -274,9 +274,9 @@
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
 
     std::unique_ptr<armnn::ITensorHandle> outputHandleRef =
-        refTensorHandleFactory->CreateTensorHandle(outputTensorInfo);
+        refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> inputHandleRef  =
-        refTensorHandleFactory->CreateTensorHandle(inputTensorInfo);
+        refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
 
     armnn::SoftmaxQueueDescriptor refData = data;
     armnn::WorkloadInfo refInfo = info;
@@ -310,7 +310,7 @@
 LayerTestResult<float,2> SimpleSoftmaxTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     float beta)
 {
     return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, beta);
@@ -319,7 +319,7 @@
 LayerTestResult<float,2> SimpleAxisSoftmaxTest(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         float beta,
         int axis)
 {
@@ -330,7 +330,7 @@
 LayerTestResult<float,3> Simple3dSoftmaxTest(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         float beta)
 {
     Simple3dSoftmaxOutputData data;
@@ -341,7 +341,7 @@
 LayerTestResult<float,3> Simple3dAxisSoftmaxTest(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         float beta,
         int axis)
 {
@@ -436,7 +436,7 @@
 LayerTestResult<float,4> Simple4dSoftmaxTest(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         float beta)
 {
     Simple4dSoftmaxData data;
@@ -447,7 +447,7 @@
 LayerTestResult<float,4> Simple4dAxisSoftmaxTest(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         float beta,
         int axis)
 {
@@ -613,7 +613,7 @@
 LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     float beta)
 {
     return SimpleSoftmaxTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, beta);
@@ -622,7 +622,7 @@
 LayerTestResult<uint8_t,3> Simple3dSoftmaxUint8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         float beta)
 {
     Simple3dSoftmaxOutputData data;
@@ -639,7 +639,7 @@
 LayerTestResult<uint8_t,4> Simple4dSoftmaxUint8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         float beta)
 {
     Simple4dSoftmaxData data;
@@ -651,7 +651,7 @@
 LayerTestResult<armnn::Half,2> SimpleSoftmaxFloat16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         float beta)
 {
     return SimpleSoftmaxTestImpl<armnn::DataType::Float16>(workloadFactory, memoryManager, tensorHandleFactory, beta);
@@ -660,7 +660,7 @@
 LayerTestResult<armnn::Half,3> Simple3dSoftmaxFloat16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         float beta)
 {
     Simple3dSoftmaxOutputData data;
@@ -671,7 +671,7 @@
 LayerTestResult<armnn::Half,4> Simple4dSoftmaxFloat16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         float beta)
 {
     Simple4dSoftmaxData data;
@@ -682,7 +682,7 @@
 LayerTestResult<int16_t,2> SimpleSoftmaxUint16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         float beta)
 {
     return SimpleSoftmaxTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, beta);
@@ -691,7 +691,7 @@
 LayerTestResult<int16_t,3> Simple3dSoftmaxUint16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         float beta)
 {
     Simple3dSoftmaxOutputData data;
@@ -702,7 +702,7 @@
 LayerTestResult<int16_t,4> Simple4dSoftmaxUint16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         float beta)
 {
     Simple4dSoftmaxData data;
@@ -715,8 +715,8 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     armnn::IWorkloadFactory& refWorkloadFactory,
-    const armnn::ITensorHandleFactory* tensorHandleFactory,
-    const armnn::ITensorHandleFactory* refTensorHandleFactory,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
+    const armnn::ITensorHandleFactory& refTensorHandleFactory,
     float beta)
 {
     return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
@@ -727,8 +727,8 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     armnn::IWorkloadFactory& refWorkloadFactory,
-    const armnn::ITensorHandleFactory* tensorHandleFactory,
-    const armnn::ITensorHandleFactory* refTensorHandleFactory,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
+    const armnn::ITensorHandleFactory& refTensorHandleFactory,
     float beta)
 {
     return CompareSoftmaxTestImpl<armnn::DataType::QAsymmU8>(
diff --git a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.hpp
index 478d380..e95cd5e 100644
--- a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.hpp
@@ -16,108 +16,108 @@
 LayerTestResult<float, 2> SimpleSoftmaxTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     float beta);
 
 LayerTestResult<float, 2> SimpleAxisSoftmaxTest(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         float beta,
         int axis);
 
 LayerTestResult<float, 3> Simple3dSoftmaxTest(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         float beta);
 
 LayerTestResult<float, 3> Simple3dAxisSoftmaxTest(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         float beta,
         int axis);
 
 LayerTestResult<float, 4> Simple4dSoftmaxTest(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         float beta);
 
 LayerTestResult<float, 4> Simple4dAxisSoftmaxTest(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         float beta,
         int axis);
 
 LayerTestResult<uint8_t, 2> SimpleSoftmaxUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     float beta);
 
 LayerTestResult<uint8_t,3> Simple3dSoftmaxUint8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         float beta);
 
 LayerTestResult<uint8_t,4> Simple4dSoftmaxUint8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         float beta);
 
 LayerTestResult<armnn::Half,2> SimpleSoftmaxFloat16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         float beta);
 
 LayerTestResult<armnn::Half,3> Simple3dSoftmaxFloat16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         float beta);
 
 LayerTestResult<armnn::Half,4> Simple4dSoftmaxFloat16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         float beta);
 
 LayerTestResult<int16_t,2> SimpleSoftmaxUint16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         float beta);
 
 LayerTestResult<int16_t,3> Simple3dSoftmaxUint16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         float beta);
 
 LayerTestResult<int16_t,4> Simple4dSoftmaxUint16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         float beta);
 
 LayerTestResult<float, 2> CompareSoftmaxTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     armnn::IWorkloadFactory& refWorkloadFactory,
-    const armnn::ITensorHandleFactory* tensorHandleFactory,
-    const armnn::ITensorHandleFactory* refTensorHandleFactory,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
+    const armnn::ITensorHandleFactory& refTensorHandleFactory,
     float beta);
 
 LayerTestResult<uint8_t, 2> CompareSoftmaxUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     armnn::IWorkloadFactory& refWorkloadFactory,
-    const armnn::ITensorHandleFactory* tensorHandleFactory,
-    const armnn::ITensorHandleFactory* refTensorHandleFactory,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
+    const armnn::ITensorHandleFactory& refTensorHandleFactory,
     float beta);
diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
index 3682e9d..6dbf820 100644
--- a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
@@ -23,7 +23,7 @@
 LayerTestResult<T, 4> SpaceToBatchNdTestImpl(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     armnn::TensorInfo& inputTensorInfo,
     armnn::TensorInfo& outputTensorInfo,
     std::vector<float>& inputData,
@@ -65,8 +65,8 @@
     ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
                                           armnnUtils::QuantizedVector<T>(outputExpectedData, qScale, qOffset));
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle  = tensorHandleFactory->CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory->CreateTensorHandle(outputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle  = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
 
     armnn::WorkloadInfo info;
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
@@ -90,7 +90,7 @@
 LayerTestResult<T, 4> SpaceToBatchNdSimpleTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     armnn::DataLayout dataLayout = armnn::DataLayout::NCHW)
 {
     armnn::TensorInfo inputTensorInfo;
@@ -126,7 +126,7 @@
 LayerTestResult<T, 4> SpaceToBatchNdMultiChannelsTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     armnn::DataLayout dataLayout = armnn::DataLayout::NCHW)
 {
     armnn::TensorInfo inputTensorInfo;
@@ -167,7 +167,7 @@
 LayerTestResult<T, 4> SpaceToBatchNdMultiBlockTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     armnn::DataLayout dataLayout = armnn::DataLayout::NCHW)
 {
     armnn::TensorInfo inputTensorInfo;
@@ -209,7 +209,7 @@
 LayerTestResult<T, 4> SpaceToBatchNdPaddingTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     armnn::DataLayout dataLayout = armnn::DataLayout::NCHW)
 {
     armnn::TensorInfo inputTensorInfo;
@@ -255,7 +255,7 @@
 LayerTestResult<T, 4> SpaceToBatchNdSimpleNhwcTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory)
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdSimpleTest<ArmnnType>(workloadFactory,
                                                memoryManager,
@@ -267,7 +267,7 @@
 LayerTestResult<T, 4> SpaceToBatchNdMultiChannelsNhwcTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory)
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdMultiChannelsTest<ArmnnType>(workloadFactory,
                                                       memoryManager,
@@ -279,7 +279,7 @@
 LayerTestResult<T, 4> SpaceToBatchNdMultiBlockNhwcTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory)
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdMultiBlockTest<ArmnnType>(workloadFactory,
                                                    memoryManager,
@@ -291,7 +291,7 @@
 LayerTestResult<T, 4> SpaceToBatchNdPaddingNhwcTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory)
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdPaddingTest<ArmnnType>(workloadFactory,
                                                 memoryManager,
@@ -304,7 +304,7 @@
 LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory)
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdSimpleTest<armnn::DataType::Float32>(workloadFactory,
                                                               memoryManager,
@@ -314,7 +314,7 @@
 LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory)
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdMultiChannelsTest<armnn::DataType::Float32>(workloadFactory,
                                                                      memoryManager,
@@ -324,7 +324,7 @@
 LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory)
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdMultiBlockTest<armnn::DataType::Float32>(workloadFactory,
                                                                   memoryManager,
@@ -334,7 +334,7 @@
 LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory)
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdPaddingTest<armnn::DataType::Float32>(workloadFactory,
                                                                memoryManager,
@@ -344,7 +344,7 @@
 LayerTestResult<armnn::Half, 4> SpaceToBatchNdSimpleFloat16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory)
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdSimpleTest<armnn::DataType::Float16>(workloadFactory,
                                                               memoryManager,
@@ -354,7 +354,7 @@
 LayerTestResult<armnn::Half, 4> SpaceToBatchNdMultiChannelsFloat16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory)
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdMultiChannelsTest<armnn::DataType::Float16>(workloadFactory,
                                                                      memoryManager,
@@ -364,7 +364,7 @@
 LayerTestResult<armnn::Half, 4> SpaceToBatchNdMultiBlockFloat16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory)
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdMultiBlockTest<armnn::DataType::Float16>(workloadFactory,
                                                                   memoryManager,
@@ -374,7 +374,7 @@
 LayerTestResult<armnn::Half, 4> SpaceToBatchNdPaddingFloat16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory)
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdPaddingTest<armnn::DataType::Float16>(workloadFactory,
                                                                memoryManager,
@@ -384,7 +384,7 @@
 LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory)
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdSimpleTest<armnn::DataType::QAsymmU8>(workloadFactory,
                                                                memoryManager,
@@ -394,7 +394,7 @@
 LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory)
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QAsymmU8>(workloadFactory,
                                                                       memoryManager,
@@ -404,7 +404,7 @@
 LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory)
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdMultiBlockTest<armnn::DataType::QAsymmU8>(workloadFactory,
                                                                    memoryManager,
@@ -414,7 +414,7 @@
 LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory)
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdPaddingTest<armnn::DataType::QAsymmU8>(workloadFactory,
                                                                 memoryManager,
@@ -424,7 +424,7 @@
 LayerTestResult<float, 4> SpaceToBatchNdSimpleNhwcFloat32Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory)
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdSimpleNhwcTest<armnn::DataType::Float32>(workloadFactory,
                                                                   memoryManager,
@@ -434,7 +434,7 @@
 LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNhwcFloat32Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory)
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdMultiChannelsNhwcTest<armnn::DataType::Float32>(workloadFactory,
                                                                          memoryManager,
@@ -444,7 +444,7 @@
 LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNhwcFloat32Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory)
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdMultiBlockNhwcTest<armnn::DataType::Float32>(workloadFactory,
                                                                       memoryManager,
@@ -454,7 +454,7 @@
 LayerTestResult<float, 4> SpaceToBatchNdPaddingNhwcFloat32Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory)
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdPaddingNhwcTest<armnn::DataType::Float32>(workloadFactory,
                                                                    memoryManager,
@@ -464,7 +464,7 @@
 LayerTestResult<armnn::Half, 4> SpaceToBatchNdSimpleNhwcFloat16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory)
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdSimpleNhwcTest<armnn::DataType::Float16>(workloadFactory,
                                                                   memoryManager,
@@ -474,7 +474,7 @@
 LayerTestResult<armnn::Half, 4> SpaceToBatchNdMultiChannelsNhwcFloat16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory)
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdMultiChannelsNhwcTest<armnn::DataType::Float16>(workloadFactory,
                                                                          memoryManager,
@@ -484,7 +484,7 @@
 LayerTestResult<armnn::Half, 4> SpaceToBatchNdMultiBlockNhwcFloat16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory)
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdMultiBlockNhwcTest<armnn::DataType::Float16>(workloadFactory,
                                                                       memoryManager,
@@ -494,7 +494,7 @@
 LayerTestResult<armnn::Half, 4> SpaceToBatchNdPaddingNhwcFloat16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory)
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdPaddingNhwcTest<armnn::DataType::Float16>(workloadFactory,
                                                                    memoryManager,
@@ -504,7 +504,7 @@
 LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNhwcUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory)
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdSimpleNhwcTest<armnn::DataType::QAsymmU8>(workloadFactory,
                                                                    memoryManager,
@@ -514,7 +514,7 @@
 LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNhwcUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory)
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdMultiChannelsNhwcTest<armnn::DataType::QAsymmU8>(workloadFactory,
                                                                           memoryManager,
@@ -524,7 +524,7 @@
 LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNhwcUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory)
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdMultiBlockNhwcTest<armnn::DataType::QAsymmU8>(workloadFactory,
                                                                        memoryManager,
@@ -534,7 +534,7 @@
 LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNhwcUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory)
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdPaddingNhwcTest<armnn::DataType::QAsymmU8>(workloadFactory,
                                                                     memoryManager,
@@ -544,7 +544,7 @@
 LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleUint16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory)
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdSimpleTest<armnn::DataType::QSymmS16>(workloadFactory,
                                                                memoryManager,
@@ -554,7 +554,7 @@
 LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsUint16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory)
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QSymmS16>(workloadFactory,
                                                                       memoryManager,
@@ -564,7 +564,7 @@
 LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockUint16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory)
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdMultiBlockTest<armnn::DataType::QSymmS16>(workloadFactory,
                                                                    memoryManager,
@@ -574,7 +574,7 @@
 LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingUint16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory)
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdPaddingTest<armnn::DataType::QSymmS16>(workloadFactory,
                                                                 memoryManager,
@@ -584,7 +584,7 @@
 LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleNhwcUint16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory)
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdSimpleNhwcTest<armnn::DataType::QSymmS16>(workloadFactory,
                                                                    memoryManager,
@@ -594,7 +594,7 @@
 LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsNhwcUint16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory)
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdMultiChannelsNhwcTest<armnn::DataType::QSymmS16>(workloadFactory,
                                                                           memoryManager,
@@ -604,7 +604,7 @@
 LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockNhwcUint16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory)
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdMultiBlockNhwcTest<armnn::DataType::QSymmS16>(workloadFactory,
                                                                        memoryManager,
@@ -614,7 +614,7 @@
 LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingNhwcUint16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory)
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return SpaceToBatchNdPaddingNhwcTest<armnn::DataType::QSymmS16>(workloadFactory,
                                                                     memoryManager,
diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.hpp
index 4180187..69ee99b 100644
--- a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.hpp
@@ -14,159 +14,159 @@
 LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory);
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory);
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory);
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory);
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<armnn::Half, 4> SpaceToBatchNdSimpleFloat16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory);
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<armnn::Half, 4> SpaceToBatchNdMultiChannelsFloat16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory);
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<armnn::Half, 4> SpaceToBatchNdMultiBlockFloat16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory);
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<armnn::Half, 4> SpaceToBatchNdPaddingFloat16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory);
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory);
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory);
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory);
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory);
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<float, 4> SpaceToBatchNdSimpleNhwcFloat32Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory);
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNhwcFloat32Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory);
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNhwcFloat32Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory);
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<float, 4> SpaceToBatchNdPaddingNhwcFloat32Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory);
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<armnn::Half, 4> SpaceToBatchNdSimpleNhwcFloat16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory);
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<armnn::Half, 4> SpaceToBatchNdMultiChannelsNhwcFloat16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory);
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<armnn::Half, 4> SpaceToBatchNdMultiBlockNhwcFloat16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory);
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<armnn::Half, 4> SpaceToBatchNdPaddingNhwcFloat16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory);
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNhwcUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory);
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNhwcUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory);
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNhwcUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory);
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNhwcUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::ITensorHandleFactory* tensorHandleFactory);
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleUint16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsUint16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockUint16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingUint16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleNhwcUint16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsNhwcUint16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockNhwcUint16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingNhwcUint16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        armnn::ITensorHandleFactory* tensorHandleFactory);
+        const armnn::ITensorHandleFactory& tensorHandleFactory);