IVGCVSW-3267 Add more code coverage to the PReLU layer

 * Added more unit tests to cover all code branches
 * Moved the InferOutput tests to separate files
 * Created convenience ARMNN_SIMPLE_TEST_CASE macro
 * Created TestUtils file for common utility functions

Change-Id: Id971d3cf77005397d1f0b2783fab68b1f0bf9dfc
Signed-off-by: Matteo Martincigh <matteo.martincigh@arm.com>
diff --git a/Android.mk b/Android.mk
index b08a4ae..0eb2e01 100644
--- a/Android.mk
+++ b/Android.mk
@@ -254,6 +254,7 @@
         src/armnn/test/UtilsTests.cpp \
         src/armnn/test/GraphTests.cpp \
         src/armnn/test/GraphUtils.cpp \
+        src/armnn/test/InferOutputTests.cpp \
         src/armnn/test/RuntimeTests.cpp \
         src/armnn/test/SubgraphViewTests.cpp \
         src/armnn/test/TensorTest.cpp \
@@ -261,7 +262,8 @@
         src/armnn/test/InstrumentTests.cpp \
         src/armnn/test/ProfilingEventTest.cpp \
         src/armnn/test/ObservableTest.cpp \
-        src/armnn/test/OptionalTest.cpp
+        src/armnn/test/OptionalTest.cpp \
+        src/armnn/test/TestUtils.cpp
 
 LOCAL_STATIC_LIBRARIES := \
         libneuralnetworks_common \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 976f8de..8fa2bf9 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -471,7 +471,8 @@
         src/armnn/test/GraphUtils.cpp
         src/armnn/test/GraphUtils.hpp
         src/armnn/test/InstrumentTests.cpp
-        src/armnn/test/LayerValidateOutputTest.cpp
+        src/armnn/test/InferOutputTests.cpp
+        src/armnn/test/InferOutputTests.hpp
         src/armnn/test/ModelAccuracyCheckerTest.cpp
         src/armnn/test/NetworkTests.cpp
         src/armnn/test/ObservableTest.cpp
@@ -493,6 +494,8 @@
         src/armnn/test/TestNameOnlyLayerVisitor.hpp
         src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
         src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
+        src/armnn/test/TestUtils.cpp
+        src/armnn/test/TestUtils.hpp
         src/armnn/test/UnitTests.cpp
         src/armnn/test/UnitTests.hpp
         src/armnn/test/UtilsTests.cpp
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index b075744..b071977 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -4,9 +4,7 @@
 //
 #pragma once
 
-#include <boost/test/unit_test.hpp>
-
-#include <boost/cast.hpp>
+#include "TestUtils.hpp"
 
 #include <backendsCommon/WorkloadData.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
@@ -17,8 +15,10 @@
 #include <Network.hpp>
 #include <ResolveType.hpp>
 
-#include <utility>
+#include <boost/test/unit_test.hpp>
+#include <boost/cast.hpp>
 
+#include <utility>
 
 using namespace armnn;
 
@@ -40,13 +40,6 @@
     return std::unique_ptr<Workload>(static_cast<Workload*>(workload.release()));
 }
 
-// Connects two layers.
-void Connect(Layer* from, Layer* to, const TensorInfo& tensorInfo, unsigned int fromIndex = 0, unsigned int toIndex = 0)
-{
-    from->GetOutputSlot(fromIndex).Connect(to->GetInputSlot(toIndex));
-    from->GetOutputHandler(fromIndex).SetTensorInfo(tensorInfo);
-}
-
 // Helper function to create tensor handlers for workloads, assuming they all use the same factory.
 void CreateTensorHandles(armnn::Graph& graph, armnn::IWorkloadFactory& factory)
 {
@@ -1280,23 +1273,30 @@
     return workloadConstant;
 }
 
-template <typename PreluWorkload, armnn::DataType DataType>
+template <typename PreluWorkload>
 std::unique_ptr<PreluWorkload> CreatePreluWorkloadTest(armnn::IWorkloadFactory& factory,
                                                        armnn::Graph& graph,
-                                                       const armnn::TensorShape& outputShape)
+                                                       const armnn::TensorShape& inputShape,
+                                                       const armnn::TensorShape& alphaShape,
+                                                       const armnn::TensorShape& outputShape,
+                                                       armnn::DataType dataType)
 {
     // Creates the PReLU layer
     Layer* const layer = graph.AddLayer<PreluLayer>("prelu");
+    BOOST_CHECK(layer != nullptr);
 
     // Creates extra layers
     Layer* const input  = graph.AddLayer<InputLayer> (0, "input");
     Layer* const alpha  = graph.AddLayer<InputLayer> (1, "alpha");
     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+    BOOST_CHECK(input  != nullptr);
+    BOOST_CHECK(alpha  != nullptr);
+    BOOST_CHECK(output != nullptr);
 
     // Connects up
-    armnn::TensorInfo inputTensorInfo ({ 1, 4, 1, 2 }, DataType);
-    armnn::TensorInfo alphaTensorInfo ({ 5, 4, 3, 1 }, DataType);
-    armnn::TensorInfo outputTensorInfo(outputShape,    DataType);
+    armnn::TensorInfo inputTensorInfo (inputShape,  dataType);
+    armnn::TensorInfo alphaTensorInfo (alphaShape,  dataType);
+    armnn::TensorInfo outputTensorInfo(outputShape, dataType);
     Connect(input, layer,  inputTensorInfo,  0, 0);
     Connect(alpha, layer,  alphaTensorInfo,  0, 1);
     Connect(layer, output, outputTensorInfo, 0, 0);
diff --git a/src/armnn/test/InferOutputTests.cpp b/src/armnn/test/InferOutputTests.cpp
new file mode 100644
index 0000000..6ce56e9
--- /dev/null
+++ b/src/armnn/test/InferOutputTests.cpp
@@ -0,0 +1,28 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "InferOutputTests.hpp"
+
+#include <test/UnitTests.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_AUTO_TEST_SUITE(LayerValidateOutput)
+
+// BatchToSpace
+ARMNN_SIMPLE_TEST_CASE(BatchToSpaceInferOutputShape, BatchToSpaceInferOutputShapeTest)
+
+// SpaceToDepth
+ARMNN_SIMPLE_TEST_CASE(SpaceToDepthInferOutputShape, SpaceToDepthInferOutputShapeTest)
+
+// PReLU
+ARMNN_SIMPLE_TEST_CASE(PreluInferOutputShapeSameDims,              PreluInferOutputShapeSameDimsTest)
+ARMNN_SIMPLE_TEST_CASE(PreluInferOutputShapeInputBigger,           PreluInferOutputShapeInputBiggerTest)
+ARMNN_SIMPLE_TEST_CASE(PreluInferOutputShapeAlphaBigger,           PreluInferOutputShapeAlphaBiggerTest)
+ARMNN_SIMPLE_TEST_CASE(PreluInferOutputShapeNoMatch,               PreluInferOutputShapeNoMatchTest)
+ARMNN_SIMPLE_TEST_CASE(PreluValidateTensorShapesFromInputsMatch,   PreluValidateTensorShapesFromInputsMatchTest)
+ARMNN_SIMPLE_TEST_CASE(PreluValidateTensorShapesFromInputsNoMatch, PreluValidateTensorShapesFromInputsNoMatchTest)
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnn/test/InferOutputTests.hpp b/src/armnn/test/InferOutputTests.hpp
new file mode 100644
index 0000000..6e5602a
--- /dev/null
+++ b/src/armnn/test/InferOutputTests.hpp
@@ -0,0 +1,195 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn/ArmNN.hpp>
+
+#include <Graph.hpp>
+#include <layers/BatchToSpaceNdLayer.hpp>
+#include <layers/SpaceToDepthLayer.hpp>
+#include <layers/PreluLayer.hpp>
+
+#include <boost/algorithm/string.hpp>
+#include <boost/test/unit_test.hpp>
+
+void BatchToSpaceInferOutputShapeTest()
+{
+    armnn::Graph graph;
+
+    armnn::BatchToSpaceNdDescriptor descriptor;
+    descriptor.m_BlockShape = {2, 2};
+    descriptor.m_Crops = {{0, 0}, {2, 0}};
+    descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+
+    armnn::BatchToSpaceNdLayer* const batchToSpaceLayer =
+        graph.AddLayer<armnn::BatchToSpaceNdLayer>(descriptor, "batchToSpace");
+
+    std::vector<armnn::TensorShape> shapes;
+    const std::vector<unsigned int> theDimSizes = {8, 1, 3, 1};
+    armnn::TensorShape shape(4, theDimSizes.data());
+    shapes.push_back(shape);
+
+    const std::vector<unsigned int> expectedDimSizes = {2, 2, 4, 1};
+    armnn::TensorShape expectedShape(4, expectedDimSizes.data());
+
+    BOOST_CHECK(expectedShape == batchToSpaceLayer->InferOutputShapes(shapes).at(0));
+}
+
+void SpaceToDepthInferOutputShapeTest()
+{
+    armnn::Graph graph;
+
+    armnn::SpaceToDepthDescriptor descriptor;
+    descriptor.m_BlockSize  = 2;
+    descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+
+    armnn::SpaceToDepthLayer* const spaceToDepthLayer =
+        graph.AddLayer<armnn::SpaceToDepthLayer>(descriptor, "spaceToDepth");
+
+    std::vector<armnn::TensorShape> shapes;
+    const std::vector<unsigned int> dimSizes{ 1, 16, 8, 3 };
+    armnn::TensorShape shape(4, dimSizes.data());
+    shapes.push_back(shape);
+
+    const std::vector<unsigned int> expectedDimSizes{ 1, 8, 4, 12 };
+    armnn::TensorShape expectedShape(4, expectedDimSizes.data());
+
+    BOOST_CHECK(expectedShape == spaceToDepthLayer->InferOutputShapes(shapes).at(0));
+}
+
+void PreluInferOutputShapeImpl(const std::vector<armnn::TensorShape>& inputShapes,
+                               std::vector<armnn::TensorShape>&       outputShapes)
+{
+    armnn::Graph graph;
+    armnn::PreluLayer* const preluLayer = graph.AddLayer<armnn::PreluLayer>("prelu");
+    outputShapes = preluLayer->InferOutputShapes(inputShapes);
+}
+
+void PreluInferOutputShapeSameDimsTest()
+{
+    const std::vector<armnn::TensorShape> inputShapes
+    {
+        { 5, 1, 1, 7 }, // Input shape
+        { 5, 4, 3, 1 }  // Alpha shape
+    };
+
+    const std::vector<armnn::TensorShape> expectedOutputShapes
+    {
+        { 5, 4, 3, 7 }  // Output shape
+    };
+
+    std::vector<armnn::TensorShape> outputShapes;
+    BOOST_CHECK_NO_THROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
+
+    BOOST_CHECK(outputShapes.size() == 1);
+    BOOST_CHECK(outputShapes[0] == expectedOutputShapes[0]);
+}
+
+void PreluInferOutputShapeInputBiggerTest()
+{
+    const std::vector<armnn::TensorShape> inputShapes
+    {
+        { 4, 1, 4, 8 }, // Input shape
+        { 5, 4, 1 }     // Alpha shape
+    };
+
+    const std::vector<armnn::TensorShape> expectedOutputShapes
+    {
+        { 4, 5, 4, 8 } // Output shape
+    };
+
+    std::vector<armnn::TensorShape> outputShapes;
+    BOOST_CHECK_NO_THROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
+
+    BOOST_CHECK(outputShapes.size() == 1);
+    BOOST_CHECK(outputShapes[0] == expectedOutputShapes[0]);
+}
+
+void PreluInferOutputShapeAlphaBiggerTest()
+{
+    const std::vector<armnn::TensorShape> inputShapes
+    {
+        { 4, 1, 2 },   // Input shape
+        { 5, 4, 3, 1 } // Alpha shape
+    };
+
+    const std::vector<armnn::TensorShape> expectedOutputShapes
+    {
+        { 5, 4, 3, 2 } // Output shape
+    };
+
+    std::vector<armnn::TensorShape> outputShapes;
+    BOOST_CHECK_NO_THROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
+
+    BOOST_CHECK(outputShapes.size() == 1);
+    BOOST_CHECK(outputShapes[0] == expectedOutputShapes[0]);
+}
+
+void PreluInferOutputShapeNoMatchTest()
+{
+    const std::vector<armnn::TensorShape> inputShapes
+    {
+        { 4, 1, 2 },   // Input shape
+        { 5, 4, 3, 1 } // Alpha shape
+    };
+
+    const std::vector<armnn::TensorShape> expectedOutputShapes
+    {
+        { 5, 7, 3, 2 } // Output shape
+    };
+
+    std::vector<armnn::TensorShape> outputShapes;
+    BOOST_CHECK_NO_THROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
+
+    BOOST_CHECK(outputShapes.size() == 1);
+    BOOST_CHECK(outputShapes[0] != expectedOutputShapes[0]);
+}
+
+void CreatePreluLayerHelper(armnn::Graph& graph,
+                            const armnn::TensorShape& inputShape,
+                            const armnn::TensorShape& alphaShape,
+                            const armnn::TensorShape& outputShape)
+{
+    // Creates the PReLU layer
+    armnn::Layer* const preluLayer = graph.AddLayer<armnn::PreluLayer>("prelu");
+
+    // Creates extra layers
+    armnn::Layer* const input  = graph.AddLayer<armnn::InputLayer> (0, "input");
+    armnn::Layer* const alpha  = graph.AddLayer<armnn::InputLayer> (1, "alpha");
+    armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
+
+    // Connects up
+    armnn::TensorInfo inputTensorInfo (inputShape,  armnn::DataType::Float32);
+    armnn::TensorInfo alphaTensorInfo (alphaShape,  armnn::DataType::Float32);
+    armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
+    Connect(input, preluLayer,  inputTensorInfo,  0, 0);
+    Connect(alpha, preluLayer,  alphaTensorInfo,  0, 1);
+    Connect(preluLayer, output, outputTensorInfo, 0, 0);
+}
+
+void PreluValidateTensorShapesFromInputsMatchTest()
+{
+    armnn::Graph graph;
+
+    // Creates the PReLU layer
+    CreatePreluLayerHelper(graph, { 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 });
+
+    // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
+    BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+}
+
+void PreluValidateTensorShapesFromInputsNoMatchTest()
+{
+    armnn::Graph graph;
+
+    // Creates the PReLU layer
+    CreatePreluLayerHelper(graph, { 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 7, 3, 2 });
+
+    // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
+    BOOST_CHECK_THROW(graph.InferTensorInfos(), armnn::LayerValidationException);
+}
diff --git a/src/armnn/test/LayerValidateOutputTest.cpp b/src/armnn/test/LayerValidateOutputTest.cpp
deleted file mode 100644
index d47959c..0000000
--- a/src/armnn/test/LayerValidateOutputTest.cpp
+++ /dev/null
@@ -1,84 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#include <armnn/ArmNN.hpp>
-
-#include <Graph.hpp>
-#include <layers/BatchToSpaceNdLayer.hpp>
-#include <layers/SpaceToDepthLayer.hpp>
-
-#include <boost/algorithm/string.hpp>
-#include <boost/test/unit_test.hpp>
-
-BOOST_AUTO_TEST_SUITE(LayerValidateOutput)
-
-BOOST_AUTO_TEST_CASE(TestBatchToSpaceInferOutputShape)
-{
-    armnn::Graph graph;
-
-    armnn::BatchToSpaceNdDescriptor descriptor;
-    descriptor.m_BlockShape = {2, 2};
-    descriptor.m_Crops = {{0, 0}, {2, 0}};
-    descriptor.m_DataLayout = armnn::DataLayout::NHWC;
-
-    armnn::BatchToSpaceNdLayer* const batchToSpaceLayer =
-        graph.AddLayer<armnn::BatchToSpaceNdLayer>(descriptor, "batchToSpace");
-
-    std::vector<armnn::TensorShape> shapes;
-    const std::vector<unsigned int> theDimSizes = {8, 1, 3, 1};
-    armnn::TensorShape shape(4, theDimSizes.data());
-    shapes.push_back(shape);
-
-    const std::vector<unsigned int> expectedDimSizes = {2, 2, 4, 1};
-    armnn::TensorShape expectedShape(4, expectedDimSizes.data());
-
-    BOOST_CHECK(expectedShape == batchToSpaceLayer->InferOutputShapes(shapes).at(0));
-}
-
-BOOST_AUTO_TEST_CASE(TestSpaceToDepthInferOutputShape)
-{
-    armnn::Graph graph;
-
-    armnn::SpaceToDepthDescriptor descriptor;
-    descriptor.m_BlockSize  = 2;
-    descriptor.m_DataLayout = armnn::DataLayout::NHWC;
-
-    armnn::SpaceToDepthLayer* const spaceToDepthLayer =
-        graph.AddLayer<armnn::SpaceToDepthLayer>(descriptor, "spaceToDepth");
-
-    std::vector<armnn::TensorShape> shapes;
-    const std::vector<unsigned int> dimSizes{ 1, 16, 8, 3 };
-    armnn::TensorShape shape(4, dimSizes.data());
-    shapes.push_back(shape);
-
-    const std::vector<unsigned int> expectedDimSizes{ 1, 8, 4, 12 };
-    armnn::TensorShape expectedShape(4, expectedDimSizes.data());
-
-    BOOST_CHECK(expectedShape == spaceToDepthLayer->InferOutputShapes(shapes).at(0));
-}
-
-BOOST_AUTO_TEST_CASE(TestPreluInferOutputShape)
-{
-    armnn::Graph graph;
-
-    armnn::PreluLayer* const preluLayer = graph.AddLayer<armnn::PreluLayer>("prelu");
-
-    std::vector<armnn::TensorShape> inputShapes
-    {
-        { 4, 1, 2 },  // Input shape
-        { 5, 4, 3, 1} // Alpha shape
-    };
-
-    const std::vector<armnn::TensorShape> expectedOutputShapes
-    {
-        { 5, 4, 3, 2 } // Output shape
-    };
-
-    const std::vector<armnn::TensorShape> outputShapes = preluLayer->InferOutputShapes(inputShapes);
-
-    BOOST_CHECK(outputShapes.size() == 1);
-    BOOST_CHECK(outputShapes[0] == expectedOutputShapes[0]);
-}
-
-BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index b0d8629..97bd8de 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -2,7 +2,8 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
-#include <boost/test/unit_test.hpp>
+
+#include "TestUtils.hpp"
 
 #include <armnn/ArmNN.hpp>
 #include <Graph.hpp>
@@ -10,6 +11,10 @@
 #include <backendsCommon/CpuTensorHandle.hpp>
 #include <FloatingPointConverter.hpp>
 
+#include <boost/test/unit_test.hpp>
+
+using namespace armnn;
+
 namespace
 {
 template <typename LayerT>
@@ -52,14 +57,6 @@
     return true;
 }
 
-// connects two layers
-using namespace armnn;
-void Connect(Layer* from, Layer* to, const TensorInfo& tensorInfo, unsigned int fromIndex = 0, unsigned int toIndex = 0)
-{
-    from->GetOutputSlot(fromIndex).Connect(to->GetInputSlot(toIndex));
-    from->GetOutputHandler(fromIndex).SetTensorInfo(tensorInfo);
-}
-
 void CreateLSTMLayerHelper(Graph &graph, bool CifgEnabled)
 {
     LstmDescriptor layerDesc;
diff --git a/src/armnn/test/TestUtils.cpp b/src/armnn/test/TestUtils.cpp
new file mode 100644
index 0000000..b0ed17e
--- /dev/null
+++ b/src/armnn/test/TestUtils.cpp
@@ -0,0 +1,20 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "TestUtils.hpp"
+
+#include <boost/assert.hpp>
+
+using namespace armnn;
+
+void Connect(armnn::IConnectableLayer* from, armnn::IConnectableLayer* to, const armnn::TensorInfo& tensorInfo,
+             unsigned int fromIndex, unsigned int toIndex)
+{
+    BOOST_ASSERT(from);
+    BOOST_ASSERT(to);
+
+    from->GetOutputSlot(fromIndex).Connect(to->GetInputSlot(toIndex));
+    from->GetOutputSlot(fromIndex).SetTensorInfo(tensorInfo);
+}
diff --git a/src/armnn/test/TestUtils.hpp b/src/armnn/test/TestUtils.hpp
new file mode 100644
index 0000000..9129d91
--- /dev/null
+++ b/src/armnn/test/TestUtils.hpp
@@ -0,0 +1,11 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/INetwork.hpp>
+
+void Connect(armnn::IConnectableLayer* from, armnn::IConnectableLayer* to, const armnn::TensorInfo& tensorInfo,
+             unsigned int fromIndex = 0, unsigned int toIndex = 0);
diff --git a/src/armnn/test/UnitTests.hpp b/src/armnn/test/UnitTests.hpp
index 04e91ad..dc97f90 100644
--- a/src/armnn/test/UnitTests.hpp
+++ b/src/armnn/test/UnitTests.hpp
@@ -73,6 +73,12 @@
     CompareTestResultIfSupported(testName, testResult);
 }
 
+#define ARMNN_SIMPLE_TEST_CASE(TestName, TestFunction) \
+    BOOST_AUTO_TEST_CASE(TestName) \
+    { \
+        TestFunction(); \
+    }
+
 #define ARMNN_AUTO_TEST_CASE(TestName, TestFunction, ...) \
     BOOST_AUTO_TEST_CASE(TestName) \
     { \
diff --git a/src/backends/backendsCommon/test/CommonTestUtils.cpp b/src/backends/backendsCommon/test/CommonTestUtils.cpp
index 7685626..950b939 100644
--- a/src/backends/backendsCommon/test/CommonTestUtils.cpp
+++ b/src/backends/backendsCommon/test/CommonTestUtils.cpp
@@ -9,13 +9,6 @@
 
 using namespace armnn;
 
-void Connect(armnn::IConnectableLayer* from, armnn::IConnectableLayer* to, const armnn::TensorInfo& tensorInfo,
-             unsigned int fromIndex, unsigned int toIndex)
-{
-    from->GetOutputSlot(fromIndex).Connect(to->GetInputSlot(toIndex));
-    from->GetOutputSlot(fromIndex).SetTensorInfo(tensorInfo);
-}
-
 SubgraphView::InputSlots CreateInputsFrom(const std::vector<Layer*>& layers)
 {
     SubgraphView::InputSlots result;
diff --git a/src/backends/backendsCommon/test/CommonTestUtils.hpp b/src/backends/backendsCommon/test/CommonTestUtils.hpp
index 5da0228..03c9755 100644
--- a/src/backends/backendsCommon/test/CommonTestUtils.hpp
+++ b/src/backends/backendsCommon/test/CommonTestUtils.hpp
@@ -12,11 +12,9 @@
 #include <backendsCommon/CpuTensorHandle.hpp>
 #include <backendsCommon/BackendRegistry.hpp>
 
-#include <algorithm>
+#include <test/TestUtils.hpp>
 
-// Connects two layers
-void Connect(armnn::IConnectableLayer* from, armnn::IConnectableLayer* to, const armnn::TensorInfo& tensorInfo,
-             unsigned int fromIndex = 0, unsigned int toIndex = 0);
+#include <algorithm>
 
 // Checks that two collections have the exact same contents (in any order)
 // The given collections do not have to contain duplicates
diff --git a/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp b/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp
index d09ebb6..08f6968 100644
--- a/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp
+++ b/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp
@@ -3,6 +3,8 @@
 // SPDX-License-Identifier: MIT
 //
 
+#include "CommonTestUtils.hpp"
+
 #include <Graph.hpp>
 
 #include <backendsCommon/CpuTensorHandle.hpp>
@@ -16,13 +18,6 @@
 using namespace armnn;
 using namespace std;
 
-// connects two layers
-void Connect(Layer* from, Layer* to, const TensorInfo& tensorInfo, unsigned int fromIndex = 0, unsigned int toIndex = 0)
-{
-    from->GetOutputSlot(fromIndex).Connect(to->GetInputSlot(toIndex));
-    from->GetOutputHandler(fromIndex).SetTensorInfo(tensorInfo);
-}
-
 /////////////////////////////////////////////////////////////////////////////////////////////
 // The following test are created specifically to test ReleaseConstantData() method in the Layer
 // They build very simple graphs including the layer will be checked.
diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp
index 14615f8..d174093 100644
--- a/src/backends/reference/test/RefCreateWorkloadTests.cpp
+++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp
@@ -870,32 +870,60 @@
     RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::Signed32>({ 2, 3, 2, 10 });
 }
 
-template <typename armnn::DataType DataType>
-static void RefCreatePreluWorkloadTest(const armnn::TensorShape& outputShape)
+static void RefCreatePreluWorkloadTest(const armnn::TensorShape& inputShape,
+                                       const armnn::TensorShape& alphaShape,
+                                       const armnn::TensorShape& outputShape,
+                                       armnn::DataType dataType)
 {
     armnn::Graph graph;
     RefWorkloadFactory factory;
-    auto workload = CreatePreluWorkloadTest<RefPreluWorkload, DataType>(factory, graph, outputShape);
+    auto workload = CreatePreluWorkloadTest<RefPreluWorkload>(factory,
+                                                              graph,
+                                                              inputShape,
+                                                              alphaShape,
+                                                              outputShape,
+                                                              dataType);
 
     // Check output is as expected
     auto queueDescriptor = workload->GetData();
     auto outputHandle = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType)));
+    BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, dataType)));
 }
 
 BOOST_AUTO_TEST_CASE(CreatePreluFloat32Workload)
 {
-    RefCreatePreluWorkloadTest<armnn::DataType::Float32>({ 5, 4, 3, 2 });
+    RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::Float32);
 }
 
 BOOST_AUTO_TEST_CASE(CreatePreluUint8Workload)
 {
-    RefCreatePreluWorkloadTest<armnn::DataType::QuantisedAsymm8>({ 5, 4, 3, 2 });
+    RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QuantisedAsymm8);
 }
 
 BOOST_AUTO_TEST_CASE(CreatePreluInt16Workload)
 {
-    RefCreatePreluWorkloadTest<armnn::DataType::QuantisedSymm16>({ 5, 4, 3, 2 });
+    RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QuantisedSymm16);
+}
+
+BOOST_AUTO_TEST_CASE(CreatePreluFloat32NoBroadcastWorkload)
+{
+    BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
+                                                 armnn::DataType::Float32),
+                      armnn::InvalidArgumentException);
+}
+
+BOOST_AUTO_TEST_CASE(CreatePreluUint8NoBroadcastWorkload)
+{
+    BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
+                                                 armnn::DataType::QuantisedAsymm8),
+                      armnn::InvalidArgumentException);
+}
+
+BOOST_AUTO_TEST_CASE(CreatePreluInt16NoBroadcastWorkload)
+{
+    BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
+                                                 armnn::DataType::QuantisedSymm16),
+                      armnn::InvalidArgumentException);
 }
 
 BOOST_AUTO_TEST_SUITE_END()