IVGCVSW-5328-5329 Fuse Activation

 * Added Fused Activation Optimization to both CL and Neon backends.
 * Added Fused Activation support to all the CL and Neon workloads
   that support it.
 * Changed ProfilingTest network to be a Convolution layer
   followed by an Abs layer rather than an Activation layer.
 * Added IBackendInternal::OptimizeSubgraphView function that can accept a
   ModelOptions.
 * Network will now call OptimizeSubgraphView passing in the ModelOptions.

Signed-off-by: Keith Davis <keith.davis@arm.com>
Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: Ib536ac3cbafc7d9b35c139ad9a65b7735262cd9d
diff --git a/Android.mk b/Android.mk
index e8bf4b6..d683c23 100644
--- a/Android.mk
+++ b/Android.mk
@@ -370,6 +370,7 @@
         src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp \
         src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp \
         src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp \
+        src/armnn/test/optimizations/FuseActivationTests.cpp \
         src/armnn/test/optimizations/InsertDebugLayerTests.cpp \
         src/armnn/test/optimizations/MovePermuteUpTests.cpp \
         src/armnn/test/optimizations/OptimizeConsecutiveReshapesTests.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 240767f..30b03dc 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -647,6 +647,7 @@
         src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
         src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
         src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp
+        src/armnn/test/optimizations/FuseActivationTests.cpp
         src/armnn/test/optimizations/FuseBatchNormTests.cpp
         src/armnn/test/optimizations/InsertDebugLayerTests.cpp
         src/armnn/test/optimizations/MovePermuteUpTests.cpp
diff --git a/include/armnn/backends/IBackendInternal.hpp b/include/armnn/backends/IBackendInternal.hpp
index 5f1b413..c7ed8ef 100644
--- a/include/armnn/backends/IBackendInternal.hpp
+++ b/include/armnn/backends/IBackendInternal.hpp
@@ -147,6 +147,9 @@
 
     virtual OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const;
 
+    virtual OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph,
+                                                   const ModelOptions& modelOptions) const;
+
     bool SupportsTensorAllocatorAPI() const;
 
     ITensorHandleFactory::FactoryId GetBackwardCompatibleFavoriteHandleFactory();
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 5c55641..d41f2f6 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -537,6 +537,7 @@
 OptimizationResult ApplyBackendOptimizations(OptimizedNetwork* optNetObjPtr,
                                              BackendSettings& backendSettings,
                                              BackendsMap& backends,
+                                             const ModelOptions& modelOptions,
                                              Optional<std::vector<std::string>&> errMessages)
 {
     ARMNN_ASSERT(optNetObjPtr);
@@ -572,7 +573,7 @@
         for (auto& subgraph : subgraphs)
         {
             // Try to optimize the current sub-graph
-            OptimizationViews optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraph);
+            OptimizationViews optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraph, modelOptions);
             ARMNN_ASSERT(optimizationViews.Validate(*subgraph));
 
             // Optimization attempted, check the resulting optimized sub-graph
@@ -1111,6 +1112,7 @@
     OptimizationResult backendOptimizationResult = ApplyBackendOptimizations(optNetObjPtr,
                                                                              backendSettings,
                                                                              backends,
+                                                                             options.m_ModelOptions,
                                                                              messages);
     if (backendOptimizationResult.m_Error)
     {
diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp
index 0dc138b..ca7a0cc 100644
--- a/src/armnn/layers/FullyConnectedLayer.cpp
+++ b/src/armnn/layers/FullyConnectedLayer.cpp
@@ -26,7 +26,6 @@
 
     FullyConnectedQueueDescriptor descriptor;
 
-    SetAdditionalInfo(descriptor);
     descriptor.m_Weight = m_Weight.get();
     if (m_Param.m_BiasEnabled)
     {
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index 0179589..e7eab9d 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -810,10 +810,10 @@
     std::vector<float> weightsVector = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
     ConstTensor        weights(TensorInfo(4, weightsDimensionSizes, DataType::Float32), weightsVector);
 
-    std::vector<float> betaVector     = {0.1f};
-    std::vector<float> gammaVector    = {0.5f};
-    std::vector<float> meanVector     = {0};
-    std::vector<float> varianceVector = {1};
+    std::vector<float> betaVector     = { 0.1f };
+    std::vector<float> gammaVector    = { 0.5f };
+    std::vector<float> meanVector     = { 0 };
+    std::vector<float> varianceVector = { 1 };
     ConstTensor        beta(TensorInfo(1, outputChannelSize, DataType::Float32), betaVector);
     ConstTensor        gamma(TensorInfo(1, outputChannelSize, DataType::Float32), gammaVector);
     ConstTensor        mean(TensorInfo(1, outputChannelSize, DataType::Float32), meanVector);
@@ -830,7 +830,7 @@
     input->GetOutputSlot().SetTensorInfo(inputInfo);
     conv->GetOutputSlot().SetTensorInfo(outputInfo);
     batchNorm->GetOutputSlot().SetTensorInfo(outputInfo);
-    conv     ->m_Weight   = std::make_unique<ScopedCpuTensorHandle>(weights);
+    conv->m_Weight        = std::make_unique<ScopedCpuTensorHandle>(weights);
     batchNorm->m_Beta     = std::make_unique<ScopedCpuTensorHandle>(beta);
     batchNorm->m_Gamma    = std::make_unique<ScopedCpuTensorHandle>(gamma);
     batchNorm->m_Mean     = std::make_unique<ScopedCpuTensorHandle>(mean);
@@ -843,9 +843,9 @@
     }
 
     // Connect layers
-    input     ->GetOutputSlot(0).Connect(conv     ->GetInputSlot(0));
-    conv      ->GetOutputSlot(0).Connect(batchNorm->GetInputSlot(0));
-    batchNorm ->GetOutputSlot(0).Connect(output   ->GetInputSlot(0));
+    input->GetOutputSlot(0).Connect(conv->GetInputSlot(0));
+    conv->GetOutputSlot(0).Connect(batchNorm->GetInputSlot(0));
+    batchNorm->GetOutputSlot(0).Connect(output->GetInputSlot(0));
 
     BOOST_CHECK(4 == graph.GetNumLayers());
     BOOST_TEST(CheckSequence(graph.cbegin(),
@@ -887,10 +887,10 @@
     auto output2   = graph.AddLayer<OutputLayer>(1, "output2");
 
     // Connect layers
-    input     ->GetOutputSlot(0).Connect(conv     ->GetInputSlot(0));
-    conv      ->GetOutputSlot(0).Connect(batchNorm->GetInputSlot(0));
-    batchNorm ->GetOutputSlot(0).Connect(output   ->GetInputSlot(0));
-    conv      ->GetOutputSlot(0).Connect(output2  ->GetInputSlot(0));
+    input->GetOutputSlot(0).Connect(conv->GetInputSlot(0));
+    conv->GetOutputSlot(0).Connect(batchNorm->GetInputSlot(0));
+    batchNorm->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+    conv->GetOutputSlot(0).Connect(output2->GetInputSlot(0));
 
     BOOST_CHECK(5 == graph.GetNumLayers());
     BOOST_TEST(CheckSequence(graph.cbegin(),
diff --git a/src/armnn/test/optimizations/FuseActivationTests.cpp b/src/armnn/test/optimizations/FuseActivationTests.cpp
new file mode 100644
index 0000000..0e85597
--- /dev/null
+++ b/src/armnn/test/optimizations/FuseActivationTests.cpp
@@ -0,0 +1,789 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "LayersFwd.hpp"
+
+#include <Network.hpp>
+#include <ResolveType.hpp>
+#include <armnn/INetwork.hpp>
+#include <test/TestUtils.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+#include <QuantizeHelper.hpp>
+#include <string>
+
+using namespace armnn;
+
+BOOST_AUTO_TEST_SUITE(Optimizer)
+
+namespace
+{
+const float   g_qScale  = 1.0f;
+const int32_t g_qOffset = 0;
+
+template<typename T>
+std::vector<T> GetVector(unsigned int size, float initial, float increment)
+{
+    std::vector<float> typeVector(size, initial);
+    std::vector<T>     vector(size);
+
+    if (size > 1)
+    {
+        for (unsigned int i = 0; i < size; ++i)
+        {
+            vector[i] = T(initial + (increment * static_cast<float>(i)));
+        }
+    }
+    return vector;
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+struct Convolution2dTest
+{
+    using LayerType = armnn::Convolution2dLayer;
+    static std::string GetReceiverLayerName() { return "Convolution2d"; };
+    static const bool isElementWise = false;
+
+    static TensorShape GetInputShape()   { return TensorShape( {1, 4, 4, 3}); }  // NHWCin
+    static TensorShape GetOutputShape()  { return TensorShape( {1, 3, 3, 4}); }  // NHWCout
+    static TensorShape GetWeightsShape() { return TensorShape( {4, 2, 2, 3}); }  // CoutHWCin
+
+    constexpr static const unsigned int inputSize  = 48; // batchIn * heightIn * widthIn * channelIn
+    constexpr static const unsigned int outputSize = 36; // batchOut * heightOut * widthOut * channelOut
+
+    static IConnectableLayer* AddReceiverLayer(INetwork* network,
+                                               const char* name)
+    {
+        Convolution2dDescriptor descriptor;
+        descriptor.m_BiasEnabled = false;
+        descriptor.m_DataLayout  = DataLayout::NHWC;
+        descriptor.m_StrideX     = 1;
+        descriptor.m_StrideY     = 1;
+
+        std::vector<float> weightsData   = {  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12,
+                                             11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
+                                             21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+                                             31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42};
+        std::vector<T>     weightsVector = armnnUtils::QuantizedVector<T>(weightsData, g_qScale, g_qOffset);
+        TensorInfo         weightsInfo(GetWeightsShape(), ArmnnType, g_qScale, g_qOffset);
+        ConstTensor        weights(weightsInfo, weightsVector);
+        Optional<ConstTensor> optionalBias;
+
+        return network->AddConvolution2dLayer(descriptor, weights, optionalBias, name);
+    }
+};
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+struct DepthwiseConvolution2dTest
+{
+public:
+    using LayerType = armnn::DepthwiseConvolution2dLayer;
+    static std::string GetReceiverLayerName() { return "DepthwiseConvolution2d"; };
+    static const bool isElementWise = false;
+
+    static TensorShape GetInputShape()   { return TensorShape( {1, 4, 4, 3}); }   // NHWCin
+    static TensorShape GetOutputShape()  { return TensorShape( {1, 3, 3, 12}); }  // NHWCout
+    static TensorShape GetWeightsShape() { return TensorShape( {4, 3, 2, 2}); }   // MCinHW
+
+    constexpr static const unsigned int inputSize  = 48; //batchIn * heightIn * widthIn * channelIn;
+    constexpr static const unsigned int outputSize = 108; //batchOut * heightOut * widthOut * channelOut;
+
+    static IConnectableLayer* AddReceiverLayer(INetwork* network,
+                                               const char* name)
+    {
+        DepthwiseConvolution2dDescriptor descriptor;
+        descriptor.m_BiasEnabled = false;
+        descriptor.m_DataLayout  = DataLayout::NHWC;
+        descriptor.m_StrideX     = 1;
+        descriptor.m_StrideY     = 1;
+
+        std::vector<float> weightsData   = { 1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12,
+                                            11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
+                                            21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+                                            31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42};
+        std::vector<T>     weightsVector = armnnUtils::QuantizedVector<T>(weightsData, g_qScale, g_qOffset);
+        TensorInfo         weightsInfo(GetWeightsShape(), ArmnnType, g_qScale, g_qOffset);
+        ConstTensor        weights(weightsInfo, weightsVector);
+        Optional<ConstTensor> optionalBias;
+
+        return network->AddDepthwiseConvolution2dLayer(descriptor, weights, optionalBias, name);
+    }
+};
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+struct FullyConnectedTest
+{
+public:
+    using LayerType = armnn::FullyConnectedLayer;
+    static std::string GetReceiverLayerName() { return "FullyConnected"; };
+    static const bool isElementWise = false;
+
+    static TensorShape GetInputShape()   { return TensorShape( {2, 5, 1, 1}); } // NCinHW
+    static TensorShape GetOutputShape()  { return TensorShape( {2, 3}); }       // NCout
+    static TensorShape GetWeightsShape() { return TensorShape( {5, 3}); }       // CinCout
+
+    constexpr static const unsigned int inputSize  = 10; // batchIn * heightIn * widthIn * channelIn
+    constexpr static const unsigned int outputSize = 6;  // batchOut * heightOut * widthOut * channelOut
+
+    static IConnectableLayer* AddReceiverLayer(INetwork* network,
+                                               const char* name)
+    {
+        FullyConnectedDescriptor descriptor;
+        descriptor.m_BiasEnabled = false;
+
+        std::vector<float> weightsData   = { 1,  2,  3,  4,  5,
+                                             6,  7,  8,  9, 10,
+                                            11, 12, 13, 14, 15};
+        std::vector<T>     weightsVector = armnnUtils::QuantizedVector<T>(weightsData, g_qScale, g_qOffset);
+        TensorInfo         weightsInfo(GetWeightsShape(), ArmnnType, g_qScale, g_qOffset);
+        ConstTensor        weights(weightsInfo, weightsVector);
+        Optional<ConstTensor> optionalBias;
+
+        return network->AddFullyConnectedLayer(descriptor, weights, optionalBias, name);
+    }
+};
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+struct BatchNormTest
+{
+public:
+    using LayerType = armnn::BatchNormalizationLayer;
+    static std::string GetReceiverLayerName() { return "BatchNorm"; };
+    static const bool isElementWise = false;
+
+    static TensorShape GetInputShape()   { return TensorShape( {1, 4, 4, 3}); }  // NHWCin
+    static TensorShape GetOutputShape()  { return TensorShape( {1, 4, 4, 3}); }  // NHWCout
+
+    constexpr static const unsigned int inputSize  = 48; // batchIn * heightIn * widthIn * channelIn
+    constexpr static const unsigned int outputSize = 48; // batchOut * heightOut * widthOut * channelOut
+
+    static IConnectableLayer* AddReceiverLayer(INetwork* network,
+                                               const char* name)
+    {
+        BatchNormalizationDescriptor descriptor;
+        descriptor.m_DataLayout = DataLayout::NHWC;
+
+        std::vector<T> betaVector     = GetVector<T>(GetOutputShape()[3], 0.0f, 0.2f);
+        std::vector<T> gammaVector    = GetVector<T>(GetOutputShape()[3], 0.5f, 0.1f);
+        std::vector<T> meanVector     = GetVector<T>(GetOutputShape()[3], 0.1f, 0.1f);
+        std::vector<T> varianceVector = GetVector<T>(GetOutputShape()[3], 1.0f, 0.1f);
+
+        const unsigned int outputChannelSize[] = { GetOutputShape()[3] };
+        ConstTensor beta(TensorInfo(1, outputChannelSize, ArmnnType), betaVector);
+        ConstTensor gamma(TensorInfo(1, outputChannelSize, ArmnnType), gammaVector);
+        ConstTensor mean(TensorInfo(1, outputChannelSize, ArmnnType), meanVector);
+        ConstTensor variance(TensorInfo(1, outputChannelSize, ArmnnType), varianceVector);
+
+        return network->AddBatchNormalizationLayer(descriptor, mean, variance, beta, gamma, name);
+    }
+};
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+struct MultiplicationTest
+{
+    using LayerType = armnn::MultiplicationLayer;
+    static std::string GetReceiverLayerName() { return "Multiplication"; };
+    static const bool isElementWise = true;
+
+    static TensorShape GetInputShape()   { return TensorShape( {1, 4, 4, 3}); }  // NHWCin
+    static TensorShape GetOutputShape()  { return TensorShape( {1, 4, 4, 3}); }  // NHWCout
+
+    constexpr static const unsigned int inputSize  = 48; // batchIn * heightIn * widthIn * channelIn
+    constexpr static const unsigned int outputSize = 48; // batchOut * heightOut * widthOut * channelOut
+
+    static IConnectableLayer* AddReceiverLayer(INetwork* network,
+                                               const char* name)
+    {
+        return network->AddMultiplicationLayer(name);
+    }
+};
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+struct AdditionTest
+{
+    using LayerType = armnn::AdditionLayer;
+    static std::string GetReceiverLayerName() { return "Addition"; };
+    static const bool isElementWise = true;
+
+    static TensorShape GetInputShape()   { return TensorShape( {1, 4, 4, 3}); }  // NHWCin
+    static TensorShape GetOutputShape()  { return TensorShape( {1, 4, 4, 3}); }  // NHWCout
+
+    constexpr static const unsigned int inputSize  = 48; // batchIn * heightIn * widthIn * channelIn
+    constexpr static const unsigned int outputSize = 48; // batchOut * heightOut * widthOut * channelOut
+
+    static IConnectableLayer* AddReceiverLayer(INetwork* network,
+                                               const char* name)
+    {
+        return network->AddAdditionLayer(name);
+    }
+};
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+struct SubtractionTest
+{
+    using LayerType = armnn::SubtractionLayer;
+    static std::string GetReceiverLayerName() { return "Subtraction"; };
+    static const bool isElementWise = true;
+
+    static TensorShape GetInputShape()   { return TensorShape( {1, 4, 4, 3}); }  // NHWCin
+    static TensorShape GetOutputShape()  { return TensorShape( {1, 4, 4, 3}); }  // NHWCout
+
+    constexpr static const unsigned int inputSize  = 48; // batchIn * heightIn * widthIn * channelIn
+    constexpr static const unsigned int outputSize = 48; // batchOut * heightOut * widthOut * channelOut
+
+    static IConnectableLayer* AddReceiverLayer(INetwork* network,
+                                               const char* name)
+    {
+        return network->AddSubtractionLayer(name);
+    }
+};
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+struct DivisionTest
+{
+    using LayerType = armnn::DivisionLayer;
+    static std::string GetReceiverLayerName() { return "Division"; };
+    static const bool isElementWise = true;
+
+    static TensorShape GetInputShape()   { return TensorShape( {1, 4, 4, 3}); }  // NHWCin
+    static TensorShape GetOutputShape()  { return TensorShape( {1, 4, 4, 3}); }  // NHWCout
+
+    constexpr static const unsigned int inputSize  = 48; // batchIn * heightIn * widthIn * channelIn
+    constexpr static const unsigned int outputSize = 48; // batchOut * heightOut * widthOut * channelOut
+
+    static IConnectableLayer* AddReceiverLayer(INetwork* network,
+                                               const char* name)
+    {
+        return network->AddDivisionLayer(name);
+    }
+};
+
+} // namespace
+
+template<typename LayerTest,
+         armnn::DataType ArmnnType>
+INetworkPtr CreatNetwork(ActivationDescriptor activationDescriptor, bool preventFusing)
+{
+    // Create a network
+    INetworkPtr network = INetwork::Create();
+
+    IConnectableLayer* inputLayer = network->AddInputLayer(0);
+
+    IConnectableLayer* receiverLayer = LayerTest::AddReceiverLayer(network.get(),
+                                                                   "receiverLayer");
+
+    IConnectableLayer* activationLayer = network->AddActivationLayer(activationDescriptor,
+                                                                     "activation");
+
+    IConnectableLayer* outputLayer  = network->AddOutputLayer(0);
+    IConnectableLayer* output2Layer = preventFusing?network->AddOutputLayer(1):nullptr;
+
+    // Define layers information
+    TensorInfo inputInfo(LayerTest::GetInputShape(), ArmnnType, g_qScale, g_qOffset);
+    TensorInfo outputInfo(LayerTest::GetOutputShape(), ArmnnType, g_qScale, g_qOffset);
+
+    // Set layer information
+    inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
+    receiverLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+    activationLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+    // Connect layers
+    inputLayer->GetOutputSlot(0).Connect(receiverLayer->GetInputSlot(0));
+    receiverLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
+    activationLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+
+    if (LayerTest::isElementWise)
+    {
+        inputLayer->GetOutputSlot(0).Connect(receiverLayer->GetInputSlot(1));
+    }
+    if (preventFusing)
+    {
+        receiverLayer->GetOutputSlot(0).Connect(output2Layer->GetInputSlot(0));
+    }
+
+    return network;
+}
+
+template<typename LayerTest,
+         armnn::DataType ArmnnType,
+         typename LayerType = typename LayerTest::LayerType,
+         typename T = armnn::ResolveType<ArmnnType>>
+void FuseActivationIntoPreviousLayerTest(ActivationDescriptor activationDescriptor, float tolerance, armnn::Compute
+backendId)
+{
+    // FIRST NETWORK: Fused
+    // Construct ArmNN network
+    INetworkPtr networkFused = CreatNetwork<LayerTest, ArmnnType>(activationDescriptor, false);
+
+    // Create ArmNN runtime
+    IRuntimePtr run = IRuntime::Create(IRuntime::CreationOptions()); // default options
+
+    // Optimise ArmNN network
+    IOptimizedNetworkPtr optNetFused = Optimize(*networkFused, {backendId}, run->GetDeviceSpec());
+
+    Graph graphFused = PolymorphicDowncast<OptimizedNetwork*>(optNetFused.get())->GetGraph();
+
+    auto checkFusedConv2d = [](const armnn::Layer* const layer)->bool {
+        return IsLayerOfType<LayerType>(layer) &&
+            (layer->GetNameStr() == "fused-activation-into-receiverLayer");
+    };
+
+    BOOST_CHECK_MESSAGE(3 == graphFused.GetNumLayers(), LayerTest::GetReceiverLayerName());
+    BOOST_TEST(CheckSequence(graphFused.cbegin(),
+                             graphFused.cend(),
+                             &IsLayerOfType<InputLayer>,
+                             checkFusedConv2d,
+                             &IsLayerOfType<OutputLayer>));
+
+    // Load network into runtime
+    NetworkId networkIdentifier;
+    BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optNetFused)) == Status::Success);
+
+    //Creates structures for inputs and outputs.
+    std::vector<float> data = GetVector<float>(LayerTest::inputSize, 1.0f, 0.1f);
+    std::vector<T> inputDataFused = armnnUtils::QuantizedVector<T>(data, g_qScale, g_qOffset);
+    std::vector<T> outputDataFused(LayerTest::outputSize);
+
+    InputTensors  inputTensorsFused{
+        {0, ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputDataFused.data())}};
+    OutputTensors outputTensorsFused{
+        {0, Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputDataFused.data())}};
+
+    // Execute network
+    run->EnqueueWorkload(networkIdentifier, inputTensorsFused, outputTensorsFused);
+
+    // SECOND NETWORK: NotFused
+    // Construct ArmNN network
+    INetworkPtr networkNotFused = CreatNetwork<LayerTest, ArmnnType>(activationDescriptor, true);
+
+    // Create ArmNN runtime
+    IRuntimePtr runNotFused = IRuntime::Create(IRuntime::CreationOptions()); // default options
+
+    // Optimise ArmNN network
+    IOptimizedNetworkPtr optNetNotFused = Optimize(*networkNotFused, {backendId}, runNotFused->GetDeviceSpec());
+
+    Graph graphNotFused = PolymorphicDowncast<OptimizedNetwork*>(optNetNotFused.get())->GetGraph();
+
+    BOOST_CHECK(5 == graphNotFused.GetNumLayers());
+    BOOST_TEST(CheckSequence(graphNotFused.cbegin(),
+                             graphNotFused.cend(),
+                             &IsLayerOfType<armnn::InputLayer>,
+                             &IsLayerOfType<LayerType>,
+                             &IsLayerOfType<armnn::ActivationLayer>,
+                             &IsLayerOfType<armnn::OutputLayer>,
+                             &IsLayerOfType<armnn::OutputLayer>));
+
+    // Load network into runtime
+    NetworkId networkIdentifierNotFused;
+    BOOST_TEST(runNotFused->LoadNetwork(networkIdentifierNotFused, std::move(optNetNotFused)) == Status::Success);
+
+    //Creates structures for inputs and outputs.
+    std::vector<T> inputDataNotFused = armnnUtils::QuantizedVector<T>(data, g_qScale, g_qOffset);
+    std::vector<T> outputDataNotFused(LayerTest::outputSize);
+    std::vector<T> outputData2NotFused(LayerTest::outputSize);
+
+    InputTensors  inputTensorsNotFused{
+        {0, ConstTensor(runNotFused->GetInputTensorInfo(networkIdentifierNotFused, 0), inputDataNotFused.data())}};
+    OutputTensors outputTensorsNotFused{
+        {0, Tensor(runNotFused->GetOutputTensorInfo(networkIdentifierNotFused, 0), outputDataNotFused.data())},
+        {1, Tensor(runNotFused->GetOutputTensorInfo(networkIdentifierNotFused, 1), outputData2NotFused.data())}};
+
+    // Execute network
+    runNotFused->EnqueueWorkload(networkIdentifierNotFused, inputTensorsNotFused, outputTensorsNotFused);
+
+    // Check the output of the fused-activation matches with the output of the activation in the "NotFused" network
+    for (unsigned int n = 0; n < outputDataFused.size(); ++n)
+    {
+        BOOST_CHECK_CLOSE(static_cast<float>(outputDataFused[n]), static_cast<float>(outputDataNotFused[n]),
+                          T(tolerance));
+    }
+}
+
+#if defined(ARMCOMPUTENEON_ENABLED)
+// ReLu fused into Receiver Layers Float32
+BOOST_AUTO_TEST_CASE(FuseReLUIntoConvFloat32CpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::ReLu;
+
+    FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::CpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseReLUIntoDWConvFloat32CpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::ReLu;
+
+    FuseActivationIntoPreviousLayerTest<DepthwiseConvolution2dTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::CpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseReLUIntoFullyConnectedFloat32CpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::ReLu;
+
+    FuseActivationIntoPreviousLayerTest<FullyConnectedTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::CpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseReLUIntoBatchNormFloat32CpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::ReLu;
+
+    FuseActivationIntoPreviousLayerTest<BatchNormTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::CpuAcc);
+}
+
+// BoundedReLu fused into Receiver Layers Float32
+BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoConvFloat32CpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
+    activationDescriptor.m_A = 1.0f;
+    activationDescriptor.m_B = -1.0f;
+
+    FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::CpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoDWConvFloat32CpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
+    activationDescriptor.m_A = 1.0f;
+    activationDescriptor.m_B = -1.0f;
+
+    FuseActivationIntoPreviousLayerTest < DepthwiseConvolution2dTest < DataType::Float32 > , DataType::Float32 >
+        (activationDescriptor, 0.0001f, armnn::Compute::CpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoFullyConnectedFloat32CpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
+    activationDescriptor.m_A = 1.0f;
+    activationDescriptor.m_B = -1.0f;
+
+    FuseActivationIntoPreviousLayerTest<FullyConnectedTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::CpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoBatchNormFloat32CpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
+    activationDescriptor.m_A = 1.0f;
+    activationDescriptor.m_B = -1.0f;
+
+    FuseActivationIntoPreviousLayerTest<BatchNormTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::CpuAcc);
+}
+
+// ReLU fused into Receiver Layers QAsymmU8
+BOOST_AUTO_TEST_CASE(FuseReLUIntoConvQAsymmU8CpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::ReLu;
+
+    FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+        (activationDescriptor, 0.0001f, armnn::Compute::CpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseReLUIntoDWConvQAsymmU8CpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::ReLu;
+
+    FuseActivationIntoPreviousLayerTest<DepthwiseConvolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+        (activationDescriptor, 0.0001f, armnn::Compute::CpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseReLUIntoFullyConnectedQAsymmU8CpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::ReLu;
+
+    FuseActivationIntoPreviousLayerTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+        (activationDescriptor, 0.0001f, armnn::Compute::CpuAcc);
+}
+
+// HardSwish fused into Receiver Layers Float32
+BOOST_AUTO_TEST_CASE(FuseHardSwishIntoConvFloat32CpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::HardSwish;
+
+    FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::CpuAcc);
+}
+
+// TanH fused into Receiver Layers Float32
+BOOST_AUTO_TEST_CASE(FuseTanHIntoConvFloat32CpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::TanH;
+
+    FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::CpuAcc);
+}
+#endif
+
+#if defined(ARMCOMPUTECL_ENABLED)
+// ReLu fused into Receiver Layers Float32
+BOOST_AUTO_TEST_CASE(FuseReLUIntoConvFloat32GpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::ReLu;
+
+    FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::GpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseReLUIntoDWConvFloat32GpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::ReLu;
+
+    FuseActivationIntoPreviousLayerTest<DepthwiseConvolution2dTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::GpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseReLUIntoFullyConnectedFloat32GpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::ReLu;
+
+    FuseActivationIntoPreviousLayerTest<FullyConnectedTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::GpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseReLUIntoBatchNormFloat32GpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::ReLu;
+
+    FuseActivationIntoPreviousLayerTest<BatchNormTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::GpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseReLUIntoMulFloat32GpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::ReLu;
+
+    FuseActivationIntoPreviousLayerTest<MultiplicationTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::GpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseReLUIntoAddFloat32GpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::ReLu;
+
+    FuseActivationIntoPreviousLayerTest<AdditionTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::GpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseReLUIntoSubFloat32GpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::ReLu;
+
+    FuseActivationIntoPreviousLayerTest<SubtractionTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::GpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseReLUIntoDivFloat32GpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::ReLu;
+
+    FuseActivationIntoPreviousLayerTest<DivisionTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::GpuAcc);
+}
+
+// BoundedReLu fused into Receiver Layers Float32
+BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoConvFloat32GpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
+    activationDescriptor.m_A = 1.0f;
+    activationDescriptor.m_B = -1.0f;
+
+    FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::GpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoDWConvFloat32GpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
+    activationDescriptor.m_A = 1.0f;
+    activationDescriptor.m_B = -1.0f;
+
+    FuseActivationIntoPreviousLayerTest<DepthwiseConvolution2dTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::GpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoFullyConnectedFloat32GpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
+    activationDescriptor.m_A = 1.0f;
+    activationDescriptor.m_B = -1.0f;
+
+    FuseActivationIntoPreviousLayerTest<FullyConnectedTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::GpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoBatchNormFloat32GpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
+    activationDescriptor.m_A = 1.0f;
+    activationDescriptor.m_B = -1.0f;
+
+    FuseActivationIntoPreviousLayerTest<BatchNormTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::GpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoMulFloat32GpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
+    activationDescriptor.m_A = 1.0f;
+    activationDescriptor.m_B = -1.0f;
+
+    FuseActivationIntoPreviousLayerTest<MultiplicationTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::GpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoAddFloat32GpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
+    activationDescriptor.m_A = 1.0f;
+    activationDescriptor.m_B = -1.0f;
+
+    FuseActivationIntoPreviousLayerTest<AdditionTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::GpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoSubFloat32GpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
+    activationDescriptor.m_A = 1.0f;
+    activationDescriptor.m_B = -1.0f;
+
+    FuseActivationIntoPreviousLayerTest<SubtractionTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::GpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoDivFloat32GpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
+    activationDescriptor.m_A = 1.0f;
+    activationDescriptor.m_B = -1.0f;
+
+    FuseActivationIntoPreviousLayerTest<DivisionTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::GpuAcc);
+}
+
+// ReLU fused into Receiver Layers QAsymmU8
+BOOST_AUTO_TEST_CASE(FuseReLUQIntoConvAsymmU8GpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::ReLu;
+
+    FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+        (activationDescriptor, 0.0001f, armnn::Compute::GpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseReLUQIntoDWConvAsymmU8GpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::ReLu;
+
+    FuseActivationIntoPreviousLayerTest<DepthwiseConvolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+        (activationDescriptor, 0.0001f, armnn::Compute::GpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseReLUQIntoFullyConnectedAsymmU8GpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::ReLu;
+
+    FuseActivationIntoPreviousLayerTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+        (activationDescriptor, 0.0001f, armnn::Compute::GpuAcc);
+}
+
+// HardSwish fused into Receiver Layers Float32
+BOOST_AUTO_TEST_CASE(FuseHardSwishIntoConvFloat32GpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::HardSwish;
+
+    FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::GpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseHardSwishIntoMulFloat32GpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::HardSwish;
+
+    FuseActivationIntoPreviousLayerTest<MultiplicationTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::GpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseHardSwishIntoAddFloat32GpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::HardSwish;
+
+    FuseActivationIntoPreviousLayerTest<AdditionTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::GpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseHardSwishIntoSubFloat32GpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::HardSwish;
+
+    FuseActivationIntoPreviousLayerTest<SubtractionTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::GpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseHardSwishIntoDivFloat32GpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::HardSwish;
+
+    FuseActivationIntoPreviousLayerTest<DivisionTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::GpuAcc);
+}
+
+// TanH fused into Receiver Layers Float32
+BOOST_AUTO_TEST_CASE(FuseTanHIntoConvFloat32GpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::TanH;
+
+    FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::GpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseTanHIntoMulFloat32GpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::TanH;
+
+    FuseActivationIntoPreviousLayerTest<MultiplicationTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::GpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseTanHIntoAddFloat32GpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::TanH;
+
+    FuseActivationIntoPreviousLayerTest<AdditionTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::GpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseTanHIntoSubFloat32GpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::TanH;
+
+    FuseActivationIntoPreviousLayerTest<SubtractionTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::GpuAcc);
+}
+BOOST_AUTO_TEST_CASE(FuseTanHIntoDivFloat32GpuAccTest)
+{
+    ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = ActivationFunction::TanH;
+
+    FuseActivationIntoPreviousLayerTest<DivisionTest<DataType::Float32>, DataType::Float32>
+        (activationDescriptor, 0.0001f, armnn::Compute::GpuAcc);
+}
+#endif
+
+BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
diff --git a/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp b/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp
new file mode 100644
index 0000000..79744ec
--- /dev/null
+++ b/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp
@@ -0,0 +1,145 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/backends/OptimizationViews.hpp>
+
+namespace armnn
+{
+
+namespace
+{
+
+//
+// this helper only works if all layers where the inputs connect to are not selected
+//
+SubgraphView::InputSlots CreateInputsFrom(const std::vector<Layer*>& layers)
+{
+    SubgraphView::InputSlots result;
+    for (auto&& layer : layers)
+    {
+        for (auto&& it = layer->BeginInputSlots(); it != layer->EndInputSlots(); ++it)
+        {
+            result.push_back(&(*it));
+        }
+    }
+    return result;
+}
+
+//
+// this helper only works if all layers where the outputs connect to are not selected
+//
+SubgraphView::OutputSlots CreateOutputsFrom(const std::vector<Layer*>& layers)
+{
+    SubgraphView::OutputSlots result;
+    for (auto&& layer : layers)
+    {
+        for (auto&& it = layer->BeginOutputSlots(); it != layer->EndOutputSlots(); ++it)
+        {
+            result.push_back(&(*it));
+        }
+    }
+    return result;
+}
+
+} // namespace
+
+inline const TensorInfo GetOverriddenDataType(const TensorInfo& info, Optional<DataType> type)
+{
+    if (!type)
+    {
+        return info;
+    }
+
+    return TensorInfo(info.GetShape(), type.value(), info.GetQuantizationScale(), info.GetQuantizationOffset());
+}
+
+inline armnn::Optional<armnn::DataType> GetOptionalBiasTypeFromWeightsType(armnn::Optional<armnn::DataType> weightsType)
+{
+    if (!weightsType)
+    {
+        return weightsType;
+    }
+
+    switch(weightsType.value())
+    {
+        case armnn::DataType::BFloat16:
+        case armnn::DataType::Float16:
+        case armnn::DataType::Float32:
+            return weightsType;
+        case armnn::DataType::QAsymmS8:
+            return armnn::DataType::Signed32;
+        case armnn::DataType::QAsymmU8:
+            return armnn::DataType::Signed32;
+        case armnn::DataType::QSymmS16:
+            return armnn::DataType::Signed32;
+        default:
+            ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
+    }
+    return armnn::EmptyOptional();
+}
+
+template<typename LayerType>
+LayerType* FuseLayerWithoutParameters(OptimizationViews& optimizationViews,
+                                      LayerType* baseLayer,
+                                      ActivationLayer* activationLayer,
+                                      ActivationDescriptor& activationDesc,
+                                      std::string name)
+{
+    LayerType* replacementLayer = optimizationViews.GetGraph().AddLayer<LayerType>(name.c_str());
+
+    replacementLayer->SetAdditionalInfoForObject(std::make_shared<ActivationDescriptor>(activationDesc));
+
+    SubgraphView substitutionSubgraph(CreateInputsFrom({baseLayer}),
+                                      CreateOutputsFrom({activationLayer}),
+                                      {baseLayer, activationLayer});
+    SubgraphView replacementSubgraph(replacementLayer);
+
+    optimizationViews.AddSubstitution({substitutionSubgraph, replacementSubgraph});
+    return replacementLayer;
+}
+
+template<typename LayerType>
+LayerType* FuseLayerWithParameters(OptimizationViews& optimizationViews,
+                                   LayerType* baseLayer,
+                                   ActivationLayer* activationLayer,
+                                   ActivationDescriptor& activationDesc,
+                                   std::string name)
+{
+    LayerType* replacementLayer = optimizationViews.GetGraph().AddLayer<LayerType>(baseLayer->GetParameters(),
+                                                                                   name.c_str());
+
+    replacementLayer->SetAdditionalInfoForObject(std::make_shared<ActivationDescriptor>(activationDesc));
+
+    SubgraphView substitutionSubgraph(CreateInputsFrom({baseLayer}),
+                                      CreateOutputsFrom({activationLayer}),
+                                      {baseLayer, activationLayer});
+    SubgraphView replacementSubgraph(replacementLayer);
+
+    optimizationViews.AddSubstitution({substitutionSubgraph, replacementSubgraph});
+    return replacementLayer;
+}
+
+template<typename LayerType>
+LayerType* FuseLayerWithWeightsAndBiases(OptimizationViews& optimizationViews,
+                                         LayerType* baseLayer,
+                                         ActivationLayer* activationLayer,
+                                         ActivationDescriptor& activationDesc,
+                                         std::string name)
+{
+    LayerType* replacementLayer = FuseLayerWithParameters(optimizationViews,
+                                                          baseLayer,
+                                                          activationLayer,
+                                                          activationDesc,
+                                                          name);
+
+    replacementLayer->m_Weight = std::move(baseLayer->m_Weight);
+    replacementLayer->m_Bias   = std::move(baseLayer->m_Bias);
+
+    return replacementLayer;
+}
+
+} // namespace armnn
diff --git a/src/backends/aclCommon/ArmComputeUtils.hpp b/src/backends/aclCommon/ArmComputeUtils.hpp
index 6b1f975..adcf828 100644
--- a/src/backends/aclCommon/ArmComputeUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeUtils.hpp
@@ -9,6 +9,8 @@
 #include <armnn/utility/Assert.hpp>
 
 #include <arm_compute/core/Types.h>
+#include "../../../../clframework/arm_compute/core/Types.h"
+#include "../backendsCommon/WorkloadData.hpp"
 
 namespace armnn
 {
@@ -77,6 +79,30 @@
         actDesc.m_A, actDesc.m_B);
 }
 
+inline arm_compute::ActivationLayerInfo
+ConvertActivationDescriptorToAclActivationLayerInfo(const ActivationDescriptor* activationDescPtr)
+{
+    if (activationDescPtr != nullptr)
+    {
+        return ConvertActivationDescriptorToAclActivationLayerInfo(static_cast<ActivationDescriptor>(
+                                                                           *activationDescPtr));
+    }
+    return arm_compute::ActivationLayerInfo();
+}
+
+inline arm_compute::ActivationLayerInfo
+ConvertAdditionalInfoToAclActivationLayerInfo(const QueueDescriptor& queueDescriptor)
+{
+    const ActivationDescriptor* activationDescPtr = queueDescriptor.GetAdditionalInformation<ActivationDescriptor>();
+
+    if (activationDescPtr != nullptr)
+    {
+        return ConvertActivationDescriptorToAclActivationLayerInfo(static_cast<ActivationDescriptor>(
+                *activationDescPtr));
+    }
+    return arm_compute::ActivationLayerInfo();
+}
+
 inline arm_compute::ComparisonOperation ConvertComparisonOperationToAcl(const ComparisonDescriptor& descriptor)
 {
     switch (descriptor.m_Operation)
@@ -130,10 +156,22 @@
 }
 
 inline arm_compute::FullyConnectedLayerInfo
-ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(const FullyConnectedDescriptor& fullyConnectedDesc)
+ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(const FullyConnectedDescriptor& fullyConnectedDesc,
+                                                            const ActivationDescriptor* activationDesc)
 {
     arm_compute::FullyConnectedLayerInfo fc_info;
     fc_info.transpose_weights = fullyConnectedDesc.m_TransposeWeightMatrix;
+    fc_info.activation_info = ConvertActivationDescriptorToAclActivationLayerInfo(activationDesc);
+    return fc_info;
+}
+
+inline arm_compute::FullyConnectedLayerInfo
+ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(const FullyConnectedDescriptor& fullyConnectedDesc,
+        arm_compute::ActivationLayerInfo activationLayerInfo)
+{
+    arm_compute::FullyConnectedLayerInfo fc_info;
+    fc_info.transpose_weights = fullyConnectedDesc.m_TransposeWeightMatrix;
+    fc_info.activation_info = activationLayerInfo;
     return fc_info;
 }
 
diff --git a/src/backends/aclCommon/CMakeLists.txt b/src/backends/aclCommon/CMakeLists.txt
index fa80437..dac663b 100644
--- a/src/backends/aclCommon/CMakeLists.txt
+++ b/src/backends/aclCommon/CMakeLists.txt
@@ -7,6 +7,7 @@
     ArmComputeTensorHandle.hpp
     ArmComputeTensorUtils.hpp
     ArmComputeTensorUtils.cpp
+    ArmComputeSubgraphUtils.hpp
     ArmComputeUtils.hpp
     BaseMemoryManager.cpp
     BaseMemoryManager.hpp
diff --git a/src/backends/backendsCommon/IBackendInternal.cpp b/src/backends/backendsCommon/IBackendInternal.cpp
index 81fc515..b08dff8 100644
--- a/src/backends/backendsCommon/IBackendInternal.cpp
+++ b/src/backends/backendsCommon/IBackendInternal.cpp
@@ -3,6 +3,7 @@
 // SPDX-License-Identifier: MIT
 //
 
+#include <armnn/BackendOptions.hpp>
 #include <armnn/backends/IBackendInternal.hpp>
 
 namespace armnn
@@ -135,6 +136,12 @@
     return result;
 }
 
+OptimizationViews IBackendInternal::OptimizeSubgraphView(const SubgraphView& subgraph,
+                                                         const ModelOptions& /*modelOptions*/) const
+{
+    return OptimizeSubgraphView(subgraph);
+}
+
 bool IBackendInternal::SupportsTensorAllocatorAPI() const
 {
     return !GetHandleFactoryPreferences().empty();
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index dd39d31..0a232dc 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -36,7 +36,7 @@
                                unsigned int numExpectedOut) const;
 
     template<typename T>
-    const T* GetAdditionalInformation()
+    const T* GetAdditionalInformation() const
     {
         return static_cast<T*>(m_AdditionalInfoObject);
     }
diff --git a/src/backends/cl/ClBackend.cpp b/src/backends/cl/ClBackend.cpp
index 6254b0a..57a5851 100644
--- a/src/backends/cl/ClBackend.cpp
+++ b/src/backends/cl/ClBackend.cpp
@@ -12,16 +12,28 @@
 #include "ClTensorHandleFactory.hpp"
 
 #include <armnn/BackendRegistry.hpp>
+#include <armnn/Descriptors.hpp>
 
+#include <aclCommon/ArmComputeSubgraphUtils.hpp>
+#include <aclCommon/ArmComputeUtils.hpp>
 #include <aclCommon/BaseMemoryManager.hpp>
 
 #include <armnn/backends/IBackendContext.hpp>
 #include <armnn/backends/IMemoryManager.hpp>
-
 #include <armnn/utility/PolymorphicDowncast.hpp>
 
+#include "workloads/ClAdditionWorkload.hpp"
+#include "workloads/ClBatchNormalizationFloatWorkload.hpp"
+#include "workloads/ClConvolution2dWorkload.hpp"
+#include "workloads/ClDepthwiseConvolutionWorkload.hpp"
+#include "workloads/ClDivisionFloatWorkload.hpp"
+#include "workloads/ClFullyConnectedWorkload.hpp"
+#include "workloads/ClMultiplicationWorkload.hpp"
+#include "workloads/ClSubtractionWorkload.hpp"
+
 #include <Optimizer.hpp>
 
+#include <arm_compute/core/Types.h>
 #include <arm_compute/runtime/CL/CLBufferAllocator.h>
 
 namespace armnn
@@ -129,11 +141,256 @@
     return layerSupport;
 }
 
-OptimizationViews ClBackend::OptimizeSubgraphView(const SubgraphView& subgraph) const
+OptimizationViews ClBackend::OptimizeSubgraphView(const SubgraphView& subgraph,
+                                                  const ModelOptions& modelOptions) const
 {
     OptimizationViews optimizationViews;
 
-    optimizationViews.AddUntouchedSubgraph(SubgraphView(subgraph));
+    auto it = subgraph.end();
+    bool isFastMathEnabled = false;
+
+#if defined(ARMCOMPUTECL_ENABLED)
+    IBackendInternal::IBackendSpecificModelContextPtr modelContextPtr = CreateBackendSpecificModelContext(modelOptions);
+
+    if (modelContextPtr)
+    {
+        auto clModelOptions = dynamic_cast<ClBackendModelContext*>(modelContextPtr.get());
+        if (clModelOptions)
+        {
+            isFastMathEnabled = clModelOptions->IsFastMathEnabled();
+        }
+    }
+#endif
+
+    while (it != subgraph.begin())
+    {
+        --it;
+        Layer& base = **it;
+
+        if ((base.GetType() == LayerType::DepthwiseConvolution2d || base.GetType() == LayerType::Convolution2d
+            || base.GetType() == LayerType::BatchNormalization || base.GetType() == LayerType::FullyConnected
+            || base.GetType() == LayerType::Addition || base.GetType() == LayerType::Multiplication
+            || base.GetType() == LayerType::Subtraction || base.GetType() == LayerType::Division)
+            && (base.GetAdditionalInformation<ActivationDescriptor>() == nullptr))
+        {
+            for (auto output = base.BeginOutputSlots(); output != base.EndOutputSlots(); ++output)
+            {
+                if (output->GetNumConnections() == 1)
+                {
+                    for (auto&& childInput : output->GetConnections())
+                    {
+                        if (childInput->GetOwningLayer().GetType() == LayerType::Activation)
+                        {
+                            Layer& child = childInput->GetOwningLayer();
+
+                            auto* activationLayer = PolymorphicDowncast<ActivationLayer*>(&child);
+
+                            const std::string name = std::string("fused-") + child.GetName() + std::string("-into-") +
+                                                     base.GetName();
+
+                            // Get params from activation layer
+                            ActivationDescriptor activationDesc = activationLayer->GetParameters();
+
+                            if (base.GetType() == LayerType::Convolution2d)
+                            {
+                                Convolution2dLayer* baseLayer = PolymorphicDowncast<Convolution2dLayer*>(&base);
+
+                                Optional<TensorInfo> biases;
+
+                                if (baseLayer->GetParameters().m_BiasEnabled)
+                                {
+                                    biases = GetOverriddenDataType(baseLayer->m_Bias->GetTensorInfo(),
+                                            GetOptionalBiasTypeFromWeightsType(
+                                                    baseLayer->m_Weight->GetTensorInfo().GetDataType()));
+                                }
+
+                                arm_compute::Status status = ClConvolution2dWorkloadValidate(
+                                        baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        baseLayer->GetParameters(),
+                                        baseLayer->m_Weight->GetTensorInfo(),
+                                        biases,
+                                        isFastMathEnabled,
+                                        &activationDesc);
+
+                                if (status)
+                                {
+                                    FuseLayerWithWeightsAndBiases<Convolution2dLayer>(optimizationViews,
+                                                                                      baseLayer,
+                                                                                      activationLayer,
+                                                                                      activationDesc,
+                                                                                      name);
+                                }
+                            }
+                            else if (base.GetType() == LayerType::DepthwiseConvolution2d)
+                            {
+                                DepthwiseConvolution2dLayer* baseLayer =
+                                        PolymorphicDowncast<DepthwiseConvolution2dLayer*>(&base);
+
+                                Optional<TensorInfo> biases;
+
+                                if (baseLayer->GetParameters().m_BiasEnabled)
+                                {
+                                    biases = GetOverriddenDataType(baseLayer->m_Bias->GetTensorInfo(),
+                                            GetOptionalBiasTypeFromWeightsType(
+                                                    baseLayer->m_Weight->GetTensorInfo().GetDataType()));
+                                }
+
+                                arm_compute::Status status = ClDepthwiseConvolutionWorkloadValidate(
+                                        baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        baseLayer->GetParameters(),
+                                        baseLayer->m_Weight->GetTensorInfo(),
+                                        biases,
+                                        &activationDesc);
+
+                                if (status)
+                                {
+                                    FuseLayerWithWeightsAndBiases<DepthwiseConvolution2dLayer>(optimizationViews,
+                                                                                               baseLayer,
+                                                                                               activationLayer,
+                                                                                               activationDesc,
+                                                                                               name);
+                                }
+                            }
+                            else if (base.GetType() == LayerType::FullyConnected)
+                            {
+                                FullyConnectedLayer* baseLayer = PolymorphicDowncast<FullyConnectedLayer*>(&base);
+
+                                arm_compute::Status status = ClFullyConnectedWorkloadValidate(
+                                        baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        baseLayer->m_Weight->GetTensorInfo(),
+                                        baseLayer->m_Bias->GetTensorInfo(),
+                                        baseLayer->GetParameters(),
+                                        &activationDesc);
+
+                                if (status)
+                                {
+                                    FuseLayerWithWeightsAndBiases<FullyConnectedLayer>(optimizationViews,
+                                                                                       baseLayer,
+                                                                                       activationLayer,
+                                                                                       activationDesc,
+                                                                                       name);
+                                }
+                            }
+                            else if (base.GetType() == LayerType::BatchNormalization)
+                            {
+                                BatchNormalizationLayer* baseLayer =
+                                        PolymorphicDowncast<BatchNormalizationLayer*>(&base);
+
+                                arm_compute::Status status = ClBatchNormalizationValidate(
+                                        baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        baseLayer->m_Mean->GetTensorInfo(),
+                                        baseLayer->m_Variance->GetTensorInfo(),
+                                        baseLayer->m_Beta->GetTensorInfo(),
+                                        baseLayer->m_Gamma->GetTensorInfo(),
+                                        baseLayer->GetParameters(),
+                                        &activationDesc);
+
+                                if (status)
+                                {
+                                    BatchNormalizationLayer* replacementLayer =
+                                            FuseLayerWithParameters<BatchNormalizationLayer>(optimizationViews,
+                                                                                             baseLayer,
+                                                                                             activationLayer,
+                                                                                             activationDesc,
+                                                                                             name);
+
+                                    replacementLayer->m_Beta     = std::move(baseLayer->m_Beta);
+                                    replacementLayer->m_Gamma    = std::move(baseLayer->m_Gamma);
+                                    replacementLayer->m_Mean     = std::move(baseLayer->m_Mean);
+                                    replacementLayer->m_Variance = std::move(baseLayer->m_Variance);
+                                }
+                            }
+                            else if (base.GetType() == LayerType::Addition)
+                            {
+                                AdditionLayer* baseLayer = PolymorphicDowncast<AdditionLayer*>(&base);
+
+                                arm_compute::Status status = ClAdditionValidate(
+                                        baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        &activationDesc);
+
+                                if (status)
+                                {
+                                    FuseLayerWithoutParameters<AdditionLayer>(optimizationViews,
+                                                                              baseLayer,
+                                                                              activationLayer,
+                                                                              activationDesc,
+                                                                              name);
+                                }
+                            }
+                            else if (base.GetType() == LayerType::Division)
+                            {
+                                DivisionLayer* baseLayer = PolymorphicDowncast<DivisionLayer*>(&base);
+
+                                arm_compute::Status status = ClDivisionWorkloadValidate(
+                                        baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        &activationDesc);
+
+                                if (status)
+                                {
+                                    FuseLayerWithoutParameters<DivisionLayer>(optimizationViews,
+                                                                              baseLayer,
+                                                                              activationLayer,
+                                                                              activationDesc,
+                                                                              name);
+                                }
+                            }
+                            else if (base.GetType() == LayerType::Multiplication)
+                            {
+                                MultiplicationLayer* baseLayer = PolymorphicDowncast<MultiplicationLayer*>(&base);
+
+                                arm_compute::Status status = ClMultiplicationWorkloadValidate(
+                                        baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        &activationDesc);
+
+                                if (status)
+                                {
+                                    FuseLayerWithoutParameters<MultiplicationLayer>(optimizationViews,
+                                                                                    baseLayer,
+                                                                                    activationLayer,
+                                                                                    activationDesc,
+                                                                                    name);
+                                }
+                            }
+                            else if (base.GetType() == LayerType::Subtraction)
+                            {
+                                SubtractionLayer* baseLayer = PolymorphicDowncast<SubtractionLayer*>(&base);
+
+                                arm_compute::Status status = ClSubtractionValidate(
+                                        baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        &activationDesc);
+
+                                if (status)
+                                {
+                                    FuseLayerWithoutParameters<SubtractionLayer>(optimizationViews,
+                                                                                 baseLayer,
+                                                                                 activationLayer,
+                                                                                 activationDesc,
+                                                                                 name);
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    }
+    // end each optimization
+    if (optimizationViews.GetSubstitutions().empty())
+    {
+        optimizationViews.AddUntouchedSubgraph(SubgraphView(subgraph));
+    }
 
     return optimizationViews;
 }
diff --git a/src/backends/cl/ClBackend.hpp b/src/backends/cl/ClBackend.hpp
index af5534e..2b19fc5 100644
--- a/src/backends/cl/ClBackend.hpp
+++ b/src/backends/cl/ClBackend.hpp
@@ -44,7 +44,8 @@
     IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override;
     IBackendInternal::ILayerSupportSharedPtr GetLayerSupport(const ModelOptions& modelOptions) const override;
 
-    OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const override;
+    OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph,
+                                           const ModelOptions& modelOptions) const override;
 
     IBackendInternal::IBackendSpecificModelContextPtr CreateBackendSpecificModelContext(
         const ModelOptions& modelOptions) const override;
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index 7c1466e..cce5c9b 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -197,7 +197,8 @@
                                    reasonIfUnsupported,
                                    input0,
                                    input1,
-                                   output);
+                                   output,
+                                   nullptr);
 }
 
 bool ClLayerSupport::IsArgMinMaxSupported(const TensorInfo& input,
@@ -230,7 +231,8 @@
                                    var,
                                    beta,
                                    gamma,
-                                   descriptor);
+                                   descriptor,
+                                   nullptr);
 }
 
 bool ClLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
@@ -357,7 +359,8 @@
                                    descriptor,
                                    weights,
                                    biases,
-                                   isFastMathEnabled);
+                                   isFastMathEnabled,
+                                   nullptr);
 }
 
 bool ClLayerSupport::IsDequantizeSupported(const TensorInfo& input,
@@ -395,7 +398,8 @@
                                    output,
                                    descriptor,
                                    weights,
-                                   biases);
+                                   biases,
+                                   nullptr);
 }
 
 bool ClLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
@@ -411,7 +415,8 @@
                                    output,
                                    descriptor,
                                    weights,
-                                   biases);
+                                   biases,
+                                   nullptr);
 }
 
 
@@ -424,7 +429,8 @@
                                    reasonIfUnsupported,
                                    input0,
                                    input1,
-                                   output);
+                                   output,
+                                   nullptr);
 }
 
 bool ClLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
@@ -494,7 +500,8 @@
                                    output,
                                    weights,
                                    biases,
-                                   descriptor);
+                                   descriptor,
+                                   nullptr);
 }
 
 bool ClLayerSupport::IsGatherSupported(const TensorInfo& input0,
@@ -639,7 +646,8 @@
                                    reasonIfUnsupported,
                                    input0,
                                    input1,
-                                   output);
+                                   output,
+                                   nullptr);
 }
 
 bool ClLayerSupport::IsNormalizationSupported(const TensorInfo& input,
@@ -911,7 +919,8 @@
                                    reasonIfUnsupported,
                                    input0,
                                    input1,
-                                   output);
+                                   output,
+                                   nullptr);
 }
 
 bool ClLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
diff --git a/src/backends/cl/workloads/ClAdditionWorkload.cpp b/src/backends/cl/workloads/ClAdditionWorkload.cpp
index 18e2400..7e75a04 100644
--- a/src/backends/cl/workloads/ClAdditionWorkload.cpp
+++ b/src/backends/cl/workloads/ClAdditionWorkload.cpp
@@ -8,6 +8,7 @@
 #include <cl/ClTensorHandle.hpp>
 #include <backendsCommon/CpuTensorHandle.hpp>
 #include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <aclCommon/ArmComputeUtils.hpp>
 
 #include "ClWorkloadUtils.hpp"
 
@@ -26,7 +27,10 @@
     arm_compute::ICLTensor& input0 = static_cast<IClTensorHandle*>(this->m_Data.m_Inputs[0])->GetTensor();
     arm_compute::ICLTensor& input1 = static_cast<IClTensorHandle*>(this->m_Data.m_Inputs[1])->GetTensor();
     arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(this->m_Data.m_Outputs[0])->GetTensor();
-    m_Layer.configure(&input0, &input1, &output, g_AclConvertPolicy);
+
+    const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
+
+    m_Layer.configure(&input0, &input1, &output, g_AclConvertPolicy, activationInfo);
 }
 
 void ClAdditionWorkload::Execute() const
@@ -37,16 +41,21 @@
 
 arm_compute::Status ClAdditionValidate(const TensorInfo& input0,
                                        const TensorInfo& input1,
-                                       const TensorInfo& output)
+                                       const TensorInfo& output,
+                                       const ActivationDescriptor* activationDescriptor)
 {
     const arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0);
     const arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1);
     const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
 
+    const arm_compute::ActivationLayerInfo activationInfo = ConvertActivationDescriptorToAclActivationLayerInfo(
+            activationDescriptor);
+
     const arm_compute::Status aclStatus = arm_compute::CLArithmeticAddition::validate(&aclInput0Info,
                                                                                       &aclInput1Info,
                                                                                       &aclOutputInfo,
-                                                                                      g_AclConvertPolicy);
+                                                                                      g_AclConvertPolicy,
+                                                                                      activationInfo);
 
     return aclStatus;
 }
diff --git a/src/backends/cl/workloads/ClAdditionWorkload.hpp b/src/backends/cl/workloads/ClAdditionWorkload.hpp
index 62bd0ae..372c4bc 100644
--- a/src/backends/cl/workloads/ClAdditionWorkload.hpp
+++ b/src/backends/cl/workloads/ClAdditionWorkload.hpp
@@ -25,5 +25,6 @@
 
 arm_compute::Status ClAdditionValidate(const TensorInfo& input0,
                                        const TensorInfo& input1,
-                                       const TensorInfo& output);
+                                       const TensorInfo& output,
+                                       const ActivationDescriptor* activationDescriptor = nullptr);
 } //namespace armnn
diff --git a/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp
index fa0be85..68942e2 100644
--- a/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp
@@ -4,13 +4,17 @@
 //
 
 #include "ClBatchNormalizationFloatWorkload.hpp"
-#include <cl/ClTensorHandle.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
-#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <cl/ClLayerSupport.hpp>
-
 #include "ClWorkloadUtils.hpp"
 
+#include <cl/ClTensorHandle.hpp>
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <aclCommon/ArmComputeUtils.hpp>
+
+#include <cl/ClLayerSupport.hpp>
+
 namespace armnn
 {
 using namespace armcomputetensorutils;
@@ -21,7 +25,8 @@
                                                  const TensorInfo& var,
                                                  const TensorInfo& beta,
                                                  const TensorInfo& gamma,
-                                                 const BatchNormalizationDescriptor &desc)
+                                                 const BatchNormalizationDescriptor& desc,
+                                                 const ActivationDescriptor* activationDescriptor)
 {
     const arm_compute::TensorInfo aclInputInfo =
           armcomputetensorutils::BuildArmComputeTensorInfo(input, desc.m_DataLayout);
@@ -36,13 +41,17 @@
     const arm_compute::TensorInfo aclGammaInfo =
           armcomputetensorutils::BuildArmComputeTensorInfo(gamma, desc.m_DataLayout);
 
+    const arm_compute::ActivationLayerInfo activationInfo = ConvertActivationDescriptorToAclActivationLayerInfo(
+            activationDescriptor);
+
     return arm_compute::CLBatchNormalizationLayer::validate(&aclInputInfo,
                                                             &aclOutputInfo,
                                                             &aclMeanInfo,
                                                             &aclVarInfo,
                                                             &aclBetaInfo,
                                                             &aclGammaInfo,
-                                                            desc.m_Eps);
+                                                            desc.m_Eps,
+                                                            activationInfo);
 }
 
 ClBatchNormalizationFloatWorkload::ClBatchNormalizationFloatWorkload(
@@ -70,13 +79,16 @@
     input.info()->set_data_layout(aclDataLayout);
     output.info()->set_data_layout(aclDataLayout);
 
+    const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
+
     m_Layer.configure(&input,
                       &output,
                       m_Mean.get(),
                       m_Variance.get(),
                       m_Beta.get(),
                       m_Gamma.get(),
-                      m_Data.m_Parameters.m_Eps);
+                      m_Data.m_Parameters.m_Eps,
+                      activationInfo);
 
     InitializeArmComputeClTensorData(*m_Mean, m_Data.m_Mean);
     InitializeArmComputeClTensorData(*m_Variance, m_Data.m_Variance);
diff --git a/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.hpp b/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.hpp
index e94bef2..ef57783 100644
--- a/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.hpp
+++ b/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.hpp
@@ -19,7 +19,8 @@
                                                  const TensorInfo& var,
                                                  const TensorInfo& beta,
                                                  const TensorInfo& gamma,
-                                                 const BatchNormalizationDescriptor& desc);
+                                                 const BatchNormalizationDescriptor& desc,
+                                                 const ActivationDescriptor* activationDescriptor = nullptr);
 
 class ClBatchNormalizationFloatWorkload : public FloatWorkload<BatchNormalizationQueueDescriptor>
 {
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
index 7b52f27..50cb9de 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
@@ -25,7 +25,8 @@
                                                     const Convolution2dDescriptor& descriptor,
                                                     const TensorInfo& weights,
                                                     const Optional<TensorInfo>& biases,
-                                                    bool isFastMathEnabled)
+                                                    bool isFastMathEnabled,
+                                                    const ActivationDescriptor* activationDescriptor)
 {
     const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
     const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
@@ -47,6 +48,9 @@
 
     arm_compute::PadStrideInfo layerInfo = BuildArmComputePadStrideInfo(descriptor);
 
+    const arm_compute::ActivationLayerInfo activationInfo = ConvertActivationDescriptorToAclActivationLayerInfo(
+            activationDescriptor);
+
     return arm_compute::CLConvolutionLayer::validate(&aclInputInfo,
                                                      &aclWeightsInfo,
                                                      optionalAclBiasesInfo,
@@ -54,7 +58,7 @@
                                                      layerInfo,
                                                      arm_compute::WeightsInfo(),
                                                      aclDilationInfo,
-                                                     arm_compute::ActivationLayerInfo(),
+                                                     activationInfo,
                                                      isFastMathEnabled);
 }
 
@@ -91,6 +95,8 @@
 
     arm_compute::PadStrideInfo padStrideInfo = BuildArmComputePadStrideInfo(m_Data.m_Parameters);
 
+    const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
+
     m_ConvolutionLayer.configure(&input,
                                  m_KernelTensor.get(),
                                  m_BiasTensor.get(),
@@ -98,7 +104,7 @@
                                  padStrideInfo,
                                  arm_compute::WeightsInfo(),
                                  aclDilationInfo,
-                                 arm_compute::ActivationLayerInfo(),
+                                 activationInfo,
                                  isFastMathEnabled);
 
     m_ConvolutionMethod =
@@ -107,7 +113,7 @@
                                                   output.info(),
                                                   padStrideInfo,
                                                   arm_compute::WeightsInfo(),
-                                                  arm_compute::ActivationLayerInfo(),
+                                                  activationInfo,
                                                   arm_compute::CLScheduler::get().target(),
                                                   aclDilationInfo,
                                                   isFastMathEnabled);
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.hpp b/src/backends/cl/workloads/ClConvolution2dWorkload.hpp
index f769422..70170b5 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.hpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.hpp
@@ -23,7 +23,8 @@
                                                     const Convolution2dDescriptor& descriptor,
                                                     const TensorInfo& weights,
                                                     const Optional<TensorInfo>& biases,
-                                                    bool isFastMathEnabled = false);
+                                                    bool isFastMathEnabled = false,
+                                                    const ActivationDescriptor* activationDescriptor = nullptr);
 
 class ClConvolution2dWorkload : public BaseWorkload<Convolution2dQueueDescriptor>
 {
diff --git a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
index 8704b12..53f1684 100644
--- a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
@@ -8,11 +8,13 @@
 #include <ResolveType.hpp>
 #include "ClWorkloadUtils.hpp"
 
+#include <armnn/Exceptions.hpp>
 #include <aclCommon/ArmComputeUtils.hpp>
 #include <aclCommon/ArmComputeTensorUtils.hpp>
 #include <cl/ClTensorHandle.hpp>
 #include <backendsCommon/CpuTensorHandle.hpp>
 #include <backendsCommon/WorkloadUtils.hpp>
+#include <backendsCommon/WorkloadData.hpp>
 
 #include <arm_compute/runtime/CL/functions/CLDepthwiseConvolutionLayer.h>
 
@@ -25,7 +27,8 @@
                                                            const TensorInfo& output,
                                                            const DepthwiseConvolution2dDescriptor& descriptor,
                                                            const TensorInfo& weights,
-                                                           const Optional<TensorInfo>& biases)
+                                                           const Optional<TensorInfo>& biases,
+                                                           const ActivationDescriptor* activationDescriptor)
 {
     const arm_compute::TensorInfo aclInputInfo  = BuildArmComputeTensorInfo(input,  descriptor.m_DataLayout);
     const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
@@ -56,13 +59,16 @@
             descriptor.m_DilationX,
             descriptor.m_DilationY);
 
+    const arm_compute::ActivationLayerInfo activationInfo = ConvertActivationDescriptorToAclActivationLayerInfo(
+            activationDescriptor);
+
     return arm_compute::CLDepthwiseConvolutionLayer::validate(&aclInputInfo,
                                                               &aclWeightsInfo,
                                                               optionalAclBiasesInfo,
                                                               &aclOutputInfo,
                                                               aclPadStrideInfo,
                                                               aclDepthMultiplier,
-                                                              arm_compute::ActivationLayerInfo(),
+                                                              activationInfo,
                                                               aclDilationInfo);
 
 }
@@ -114,6 +120,8 @@
 
     arm_compute::PadStrideInfo padStrideInfo = BuildArmComputePadStrideInfo(m_Data.m_Parameters);
 
+    const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
+
     m_DepthwiseConvolutionLayer = std::make_unique<arm_compute::CLDepthwiseConvolutionLayer>();
     static_cast<arm_compute::CLDepthwiseConvolutionLayer*>(m_DepthwiseConvolutionLayer.get())->configure(
         &input,
@@ -122,7 +130,7 @@
         &output,
         padStrideInfo,
         depthMultiplier,
-        arm_compute::ActivationLayerInfo(),
+        activationInfo,
         aclDilationInfo);
 
     ARMNN_ASSERT(m_DepthwiseConvolutionLayer);
diff --git a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.hpp b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.hpp
index fc277b9..c759137 100644
--- a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.hpp
+++ b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.hpp
@@ -18,7 +18,8 @@
                                                            const TensorInfo& output,
                                                            const DepthwiseConvolution2dDescriptor& descriptor,
                                                            const TensorInfo& weights,
-                                                           const Optional<TensorInfo>& biases);
+                                                           const Optional<TensorInfo>& biases,
+                                                           const ActivationDescriptor* activationDescriptor = nullptr);
 
 class ClDepthwiseConvolutionWorkload : public BaseWorkload<DepthwiseConvolution2dQueueDescriptor>
 {
diff --git a/src/backends/cl/workloads/ClDivisionFloatWorkload.cpp b/src/backends/cl/workloads/ClDivisionFloatWorkload.cpp
index 2a27f8a..c79e55e 100644
--- a/src/backends/cl/workloads/ClDivisionFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClDivisionFloatWorkload.cpp
@@ -4,9 +4,12 @@
 //
 
 #include "ClDivisionFloatWorkload.hpp"
-#include <cl/ClTensorHandle.hpp>
+
+#include <aclCommon/ArmComputeUtils.hpp>
 #include <backendsCommon/CpuTensorHandle.hpp>
 
+#include <cl/ClTensorHandle.hpp>
+
 #include "ClWorkloadUtils.hpp"
 
 namespace armnn
@@ -14,13 +17,17 @@
 
 arm_compute::Status ClDivisionWorkloadValidate(const TensorInfo& input0,
                                                const TensorInfo& input1,
-                                               const TensorInfo& output)
+                                               const TensorInfo& output,
+                                               const ActivationDescriptor* activationDescriptor)
 {
     const arm_compute::TensorInfo aclInput1 = armcomputetensorutils::BuildArmComputeTensorInfo(input0);
     const arm_compute::TensorInfo aclInput2 = armcomputetensorutils::BuildArmComputeTensorInfo(input1);
     const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
 
-    return arm_compute::CLArithmeticDivision::validate(&aclInput1, &aclInput2, &aclOutput);
+    const arm_compute::ActivationLayerInfo activationInfo = ConvertActivationDescriptorToAclActivationLayerInfo(
+            activationDescriptor);
+
+    return arm_compute::CLArithmeticDivision::validate(&aclInput1, &aclInput2, &aclOutput, activationInfo);
 }
 
 
@@ -33,8 +40,10 @@
     arm_compute::ICLTensor& input0 = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
     arm_compute::ICLTensor& input1 = static_cast<IClTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
     arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-    // Construct
-    m_ArithmeticDivision.configure(&input0, &input1, &output);
+
+    const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
+
+    m_ArithmeticDivision.configure(&input0, &input1, &output, activationInfo);
 }
 
 void ClDivisionFloatWorkload::Execute() const
diff --git a/src/backends/cl/workloads/ClDivisionFloatWorkload.hpp b/src/backends/cl/workloads/ClDivisionFloatWorkload.hpp
index ddca87d..71d27ed 100644
--- a/src/backends/cl/workloads/ClDivisionFloatWorkload.hpp
+++ b/src/backends/cl/workloads/ClDivisionFloatWorkload.hpp
@@ -14,7 +14,8 @@
 
 arm_compute::Status ClDivisionWorkloadValidate(const TensorInfo& input0,
                                                const TensorInfo& input1,
-                                               const TensorInfo& output);
+                                               const TensorInfo& output,
+                                               const ActivationDescriptor* activationDescriptor = nullptr);
 
 class ClDivisionFloatWorkload : public FloatWorkload<DivisionQueueDescriptor>
 {
diff --git a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
index 60eb138..eaec639 100644
--- a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
+++ b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
@@ -20,7 +20,8 @@
                                                      const TensorInfo& output,
                                                      const TensorInfo& weights,
                                                      const TensorInfo& biases,
-                                                     const FullyConnectedDescriptor& descriptor)
+                                                     const FullyConnectedDescriptor& descriptor,
+                                                     const ActivationDescriptor* activationDescriptor)
 {
     const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input);
     const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output);
@@ -35,7 +36,7 @@
     }
 
     const arm_compute::FullyConnectedLayerInfo fullyConnectedLayerInfo =
-        ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(descriptor);
+        ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(descriptor, activationDescriptor);
 
     return arm_compute::CLFullyConnectedLayer::validate(&aclInput,
                                                         &aclWeights,
@@ -63,9 +64,11 @@
     arm_compute::ICLTensor& input  = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
     arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
 
-    // Construct
-    arm_compute::FullyConnectedLayerInfo fc_info;
-    fc_info.transpose_weights = m_Data.m_Parameters.m_TransposeWeightMatrix;
+    const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
+
+    arm_compute::FullyConnectedLayerInfo fc_info =
+            ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(descriptor.m_Parameters, activationInfo);
+
     m_FullyConnectedLayer.configure(&input, m_WeightsTensor.get(), m_BiasesTensor.get(), &output, fc_info);
 
     InitializeArmComputeClTensorData(*m_WeightsTensor, m_Data.m_Weight);
diff --git a/src/backends/cl/workloads/ClFullyConnectedWorkload.hpp b/src/backends/cl/workloads/ClFullyConnectedWorkload.hpp
index e13436e..311b594 100644
--- a/src/backends/cl/workloads/ClFullyConnectedWorkload.hpp
+++ b/src/backends/cl/workloads/ClFullyConnectedWorkload.hpp
@@ -19,7 +19,8 @@
                                                      const TensorInfo& output,
                                                      const TensorInfo& weights,
                                                      const TensorInfo& biases,
-                                                     const FullyConnectedDescriptor& descriptor);
+                                                     const FullyConnectedDescriptor& descriptor,
+                                                     const ActivationDescriptor* activationDescriptor = nullptr);
 
 class ClFullyConnectedWorkload : public armnn::BaseWorkload<armnn::FullyConnectedQueueDescriptor>
 {
diff --git a/src/backends/cl/workloads/ClMultiplicationWorkload.cpp b/src/backends/cl/workloads/ClMultiplicationWorkload.cpp
index e9b75c3..46a1c4b 100644
--- a/src/backends/cl/workloads/ClMultiplicationWorkload.cpp
+++ b/src/backends/cl/workloads/ClMultiplicationWorkload.cpp
@@ -4,8 +4,12 @@
 //
 
 #include "ClMultiplicationWorkload.hpp"
-#include <cl/ClTensorHandle.hpp>
+
+#include <aclCommon/ArmComputeUtils.hpp>
 #include <backendsCommon/CpuTensorHandle.hpp>
+
+#include <cl/ClTensorHandle.hpp>
+
 #include "ClWorkloadUtils.hpp"
 
 namespace armnn
@@ -13,7 +17,8 @@
 
 arm_compute::Status ClMultiplicationWorkloadValidate(const TensorInfo& input0,
                                                      const TensorInfo& input1,
-                                                     const TensorInfo& output)
+                                                     const TensorInfo& output,
+                                                     const ActivationDescriptor* activationDescriptor)
 {
     const arm_compute::TensorInfo aclInput1 = armcomputetensorutils::BuildArmComputeTensorInfo(input0);
     const arm_compute::TensorInfo aclInput2 = armcomputetensorutils::BuildArmComputeTensorInfo(input1);
@@ -23,6 +28,9 @@
                           arm_compute::ConvertPolicy::SATURATE :
                           arm_compute::ConvertPolicy::WRAP;
 
+    const arm_compute::ActivationLayerInfo activationInfo = ConvertActivationDescriptorToAclActivationLayerInfo(
+            activationDescriptor);
+
     // At the time of writing, configure() will fail if a rounding policy other than TO_ZERO is supplied to it,
     // when providing a scale of 1.0 for F32 tensors, even though the provided rounding policy appears to be
     // ignored for F32 tensors.
@@ -31,7 +39,8 @@
                                                             &aclOutput,
                                                             1.0f,
                                                             convertPolicy,
-                                                            arm_compute::RoundingPolicy::TO_ZERO);
+                                                            arm_compute::RoundingPolicy::TO_ZERO,
+                                                            activationInfo);
 }
 
 
@@ -50,13 +59,16 @@
                           arm_compute::ConvertPolicy::SATURATE :
                           arm_compute::ConvertPolicy::WRAP;
 
+    const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
+
     // Construct
     m_PixelWiseMultiplication.configure(&input0,
                                         &input1,
                                         &output,
                                         1.0f,
                                         convertPolicy,
-                                        arm_compute::RoundingPolicy::TO_NEAREST_EVEN);
+                                        arm_compute::RoundingPolicy::TO_NEAREST_EVEN,
+                                        activationInfo);
 }
 
 void ClMultiplicationWorkload::Execute() const
diff --git a/src/backends/cl/workloads/ClMultiplicationWorkload.hpp b/src/backends/cl/workloads/ClMultiplicationWorkload.hpp
index 732bb16..461449c 100644
--- a/src/backends/cl/workloads/ClMultiplicationWorkload.hpp
+++ b/src/backends/cl/workloads/ClMultiplicationWorkload.hpp
@@ -14,7 +14,8 @@
 
 arm_compute::Status ClMultiplicationWorkloadValidate(const TensorInfo& input0,
                                                      const TensorInfo& input1,
-                                                     const TensorInfo& output);
+                                                     const TensorInfo& output,
+                                                     const ActivationDescriptor* activationDescriptor = nullptr);
 
 class ClMultiplicationWorkload : public BaseWorkload<MultiplicationQueueDescriptor>
 {
diff --git a/src/backends/cl/workloads/ClSubtractionWorkload.cpp b/src/backends/cl/workloads/ClSubtractionWorkload.cpp
index 38154eb..c9fb556 100644
--- a/src/backends/cl/workloads/ClSubtractionWorkload.cpp
+++ b/src/backends/cl/workloads/ClSubtractionWorkload.cpp
@@ -7,9 +7,11 @@
 
 #include <cl/ClTensorHandle.hpp>
 #include <backendsCommon/CpuTensorHandle.hpp>
+#include <aclCommon/ArmComputeUtils.hpp>
 #include <aclCommon/ArmComputeTensorUtils.hpp>
 
 #include "ClWorkloadUtils.hpp"
+#include "../../../../include/armnn/ArmNN.hpp"
 
 namespace armnn
 {
@@ -26,7 +28,10 @@
     arm_compute::ICLTensor& input0 = static_cast<IClTensorHandle*>(this->m_Data.m_Inputs[0])->GetTensor();
     arm_compute::ICLTensor& input1 = static_cast<IClTensorHandle*>(this->m_Data.m_Inputs[1])->GetTensor();
     arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(this->m_Data.m_Outputs[0])->GetTensor();
-    m_Layer.configure(&input0, &input1, &output, g_AclConvertPolicy);
+
+    const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
+
+    m_Layer.configure(&input0, &input1, &output, g_AclConvertPolicy, activationInfo);
 }
 
 void ClSubtractionWorkload::Execute() const
@@ -37,16 +42,21 @@
 
 arm_compute::Status ClSubtractionValidate(const TensorInfo& input0,
                                           const TensorInfo& input1,
-                                          const TensorInfo& output)
+                                          const TensorInfo& output,
+                                          const ActivationDescriptor* activationDescriptor)
 {
     const arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0);
     const arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1);
     const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
 
+    const arm_compute::ActivationLayerInfo activationInfo = ConvertActivationDescriptorToAclActivationLayerInfo(
+            activationDescriptor);
+
     const arm_compute::Status aclStatus = arm_compute::CLArithmeticSubtraction::validate(&aclInput0Info,
                                                                                          &aclInput1Info,
                                                                                          &aclOutputInfo,
-                                                                                         g_AclConvertPolicy);
+                                                                                         g_AclConvertPolicy,
+                                                                                         activationInfo);
 
     return aclStatus;
 }
diff --git a/src/backends/cl/workloads/ClSubtractionWorkload.hpp b/src/backends/cl/workloads/ClSubtractionWorkload.hpp
index da6d17c..9f51de6 100644
--- a/src/backends/cl/workloads/ClSubtractionWorkload.hpp
+++ b/src/backends/cl/workloads/ClSubtractionWorkload.hpp
@@ -25,5 +25,6 @@
 
 arm_compute::Status ClSubtractionValidate(const TensorInfo& input0,
                                           const TensorInfo& input1,
-                                          const TensorInfo& output);
+                                          const TensorInfo& output,
+                                          const ActivationDescriptor* activationDescriptor = nullptr);
 } //namespace armnn
diff --git a/src/backends/neon/NeonBackend.cpp b/src/backends/neon/NeonBackend.cpp
index 9862ddb..150bc34 100644
--- a/src/backends/neon/NeonBackend.cpp
+++ b/src/backends/neon/NeonBackend.cpp
@@ -11,7 +11,10 @@
 #include "NeonTensorHandleFactory.hpp"
 
 #include <armnn/BackendRegistry.hpp>
+#include <armnn/Descriptors.hpp>
 
+#include <aclCommon/ArmComputeSubgraphUtils.hpp>
+#include <aclCommon/ArmComputeUtils.hpp>
 #include <aclCommon/BaseMemoryManager.hpp>
 
 #include <armnn/backends/IBackendContext.hpp>
@@ -19,8 +22,18 @@
 
 #include <armnn/utility/PolymorphicDowncast.hpp>
 
+#include "workloads/NeonAdditionWorkload.hpp"
+#include "workloads/NeonBatchNormalizationWorkload.hpp"
+#include "workloads/NeonConvolution2dWorkload.hpp"
+#include "workloads/NeonDepthwiseConvolutionWorkload.hpp"
+#include "workloads/NeonDivisionWorkload.hpp"
+#include "workloads/NeonFullyConnectedWorkload.hpp"
+#include "workloads/NeonMultiplicationWorkload.hpp"
+#include "workloads/NeonSubtractionWorkload.hpp"
+
 #include <Optimizer.hpp>
 
+#include <arm_compute/core/Types.h>
 #include <arm_compute/runtime/Allocator.h>
 
 namespace armnn
@@ -122,7 +135,238 @@
 {
     OptimizationViews optimizationViews;
 
-    optimizationViews.AddUntouchedSubgraph(SubgraphView(subgraph));
+    auto it = subgraph.end();
+
+    while (it != subgraph.begin())
+    {
+        --it;
+        Layer& base = **it;
+
+        if ((base.GetType() == LayerType::DepthwiseConvolution2d || base.GetType() == LayerType::Convolution2d
+             || base.GetType() == LayerType::BatchNormalization || base.GetType() == LayerType::FullyConnected
+             || base.GetType() == LayerType::Addition || base.GetType() == LayerType::Multiplication
+             || base.GetType() == LayerType::Subtraction || base.GetType() == LayerType::Division)
+            && (base.GetAdditionalInformation<ActivationDescriptor>() == nullptr))
+        {
+            for (auto output = base.BeginOutputSlots(); output != base.EndOutputSlots(); ++output)
+            {
+                if (output->GetNumConnections() == 1)
+                {
+                    for (auto&& childInput : output->GetConnections())
+                    {
+                        if (childInput->GetOwningLayer().GetType() == LayerType::Activation)
+                        {
+                            Layer& child = childInput->GetOwningLayer();
+
+                            auto* activationLayer = PolymorphicDowncast<ActivationLayer*>(&child);
+
+                            const std::string name = std::string("fused-") + child.GetName() + std::string("-into-") +
+                                                     base.GetName();
+
+                            // Get params from activation layer
+                            ActivationDescriptor activationDesc = activationLayer->GetParameters();
+
+                            if (base.GetType() == LayerType::Convolution2d)
+                            {
+                                Convolution2dLayer* baseLayer = PolymorphicDowncast<Convolution2dLayer*>(&base);
+
+                                Optional<TensorInfo> biases;
+
+                                if (baseLayer->GetParameters().m_BiasEnabled)
+                                {
+                                    biases = GetOverriddenDataType(baseLayer->m_Bias->GetTensorInfo(),
+                                            GetOptionalBiasTypeFromWeightsType(
+                                                    baseLayer->m_Weight->GetTensorInfo().GetDataType()));
+                                }
+
+                                arm_compute::Status status = NeonConvolution2dWorkloadValidate(
+                                        baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        baseLayer->GetParameters(),
+                                        baseLayer->m_Weight->GetTensorInfo(),
+                                        biases,
+                                        false,
+                                        &activationDesc);
+
+                                if (status)
+                                {
+                                    FuseLayerWithWeightsAndBiases<Convolution2dLayer>(optimizationViews,
+                                                                                      baseLayer,
+                                                                                      activationLayer,
+                                                                                      activationDesc,
+                                                                                      name);
+                                }
+                            }
+                            else if (base.GetType() == LayerType::DepthwiseConvolution2d)
+                            {
+                                DepthwiseConvolution2dLayer* baseLayer =
+                                        PolymorphicDowncast<DepthwiseConvolution2dLayer*>(&base);
+
+                                Optional<TensorInfo> biases;
+
+                                if (baseLayer->GetParameters().m_BiasEnabled)
+                                {
+                                    biases = GetOverriddenDataType(baseLayer->m_Bias->GetTensorInfo(),
+                                            GetOptionalBiasTypeFromWeightsType(
+                                                    baseLayer->m_Weight->GetTensorInfo().GetDataType()));
+                                }
+
+                                arm_compute::Status status = NeonDepthwiseConvolutionWorkloadValidate(
+                                        baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        baseLayer->GetParameters(),
+                                        baseLayer->m_Weight->GetTensorInfo(),
+                                        biases,
+                                        &activationDesc);
+
+                                if (status)
+                                {
+                                    FuseLayerWithWeightsAndBiases<DepthwiseConvolution2dLayer>(optimizationViews,
+                                                                                               baseLayer,
+                                                                                               activationLayer,
+                                                                                               activationDesc,
+                                                                                               name);
+                                }
+                            }
+                            else if (base.GetType() == LayerType::FullyConnected)
+                            {
+                                FullyConnectedLayer* baseLayer = PolymorphicDowncast<FullyConnectedLayer*>(&base);
+
+                                arm_compute::Status status = NeonFullyConnectedWorkloadValidate(
+                                        baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        baseLayer->m_Weight->GetTensorInfo(),
+                                        baseLayer->m_Bias->GetTensorInfo(),
+                                        baseLayer->GetParameters(),
+                                        &activationDesc);
+
+                                if (status)
+                                {
+                                    FuseLayerWithWeightsAndBiases<FullyConnectedLayer>(optimizationViews,
+                                                                                       baseLayer,
+                                                                                       activationLayer,
+                                                                                       activationDesc,
+                                                                                       name);
+                                }
+                            }
+                            else if (base.GetType() == LayerType::BatchNormalization)
+                            {
+                                BatchNormalizationLayer* baseLayer =
+                                        PolymorphicDowncast<BatchNormalizationLayer*>(&base);
+
+                                arm_compute::Status status = NeonBatchNormalizationValidate(
+                                        baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        baseLayer->m_Mean->GetTensorInfo(),
+                                        baseLayer->m_Variance->GetTensorInfo(),
+                                        baseLayer->m_Beta->GetTensorInfo(),
+                                        baseLayer->m_Gamma->GetTensorInfo(),
+                                        baseLayer->GetParameters(),
+                                        &activationDesc);
+
+                                if (status)
+                                {
+                                    BatchNormalizationLayer* replacementLayer =
+                                            FuseLayerWithParameters<BatchNormalizationLayer>(
+                                                    optimizationViews,
+                                                    baseLayer,
+                                                    activationLayer,
+                                                    activationDesc,
+                                                    name);
+
+                                    replacementLayer->m_Beta     = std::move(baseLayer->m_Beta);
+                                    replacementLayer->m_Gamma    = std::move(baseLayer->m_Gamma);
+                                    replacementLayer->m_Mean     = std::move(baseLayer->m_Mean);
+                                    replacementLayer->m_Variance = std::move(baseLayer->m_Variance);
+                                }
+                            }
+                            else if (base.GetType() == LayerType::Addition)
+                            {
+                                AdditionLayer* baseLayer = PolymorphicDowncast<AdditionLayer*>(&base);
+
+                                arm_compute::Status status = NeonAdditionWorkloadValidate(
+                                        baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        &activationDesc);
+
+                                if (status)
+                                {
+                                    FuseLayerWithoutParameters<AdditionLayer>(optimizationViews,
+                                                                              baseLayer,
+                                                                              activationLayer,
+                                                                              activationDesc,
+                                                                              name);
+                                }
+                            }
+                            else if (base.GetType() == LayerType::Division)
+                            {
+                                DivisionLayer* baseLayer = PolymorphicDowncast<DivisionLayer*>(&base);
+
+                                arm_compute::Status status = NeonDivisionWorkloadValidate(
+                                        baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        &activationDesc);
+
+                                if (status)
+                                {
+                                    FuseLayerWithoutParameters<DivisionLayer>(optimizationViews,
+                                                                              baseLayer,
+                                                                              activationLayer,
+                                                                              activationDesc,
+                                                                              name);
+                                }
+                            }
+                            else if (base.GetType() == LayerType::Multiplication)
+                            {
+                                MultiplicationLayer* baseLayer = PolymorphicDowncast<MultiplicationLayer*>(&base);
+
+                                arm_compute::Status status = NeonMultiplicationWorkloadValidate(
+                                        baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        &activationDesc);
+
+                                if (status)
+                                {
+                                    FuseLayerWithoutParameters<MultiplicationLayer>(optimizationViews,
+                                                                                    baseLayer,
+                                                                                    activationLayer,
+                                                                                    activationDesc,
+                                                                                    name);
+                                }
+                            }
+                            else if (base.GetType() == LayerType::Subtraction)
+                            {
+                                SubtractionLayer* baseLayer = PolymorphicDowncast<SubtractionLayer*>(&base);
+
+                                arm_compute::Status status = NeonSubtractionWorkloadValidate(
+                                        baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                        &activationDesc);
+
+                                if (status)
+                                {
+                                    FuseLayerWithoutParameters<SubtractionLayer>(optimizationViews,
+                                                                                 baseLayer,
+                                                                                 activationLayer,
+                                                                                 activationDesc,
+                                                                                 name);
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    if (optimizationViews.GetSubstitutions().empty())
+    {
+        optimizationViews.AddUntouchedSubgraph(SubgraphView(subgraph));
+    }
 
     return optimizationViews;
 }
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 0084dbd..f55d1c8 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -167,7 +167,8 @@
                                    reasonIfUnsupported,
                                    input0,
                                    input1,
-                                   output);
+                                   output,
+                                   nullptr);
 }
 
 bool NeonLayerSupport::IsArgMinMaxSupported(const TensorInfo& input,
@@ -199,7 +200,8 @@
                                    var,
                                    beta,
                                    gamma,
-                                   descriptor);
+                                   descriptor,
+                                   nullptr);
 }
 
 bool NeonLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
@@ -345,7 +347,8 @@
                                    descriptor,
                                    weights,
                                    biases,
-                                   isFastMathEnabled);
+                                   isFastMathEnabled,
+                                   nullptr);
 }
 
 bool NeonLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input,
@@ -373,7 +376,8 @@
                                    output,
                                    descriptor,
                                    weights,
-                                   biases);
+                                   biases,
+                                   nullptr);
 }
 
 bool NeonLayerSupport::IsDequantizeSupported(const TensorInfo& input,
@@ -399,7 +403,8 @@
                                    output,
                                    descriptor,
                                    weights,
-                                   biases);
+                                   biases,
+                                   nullptr);
 }
 
 bool NeonLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
@@ -474,7 +479,8 @@
                                    output,
                                    weights,
                                    biases,
-                                   descriptor);
+                                   descriptor,
+                                   nullptr);
 }
 
 bool NeonLayerSupport::IsGatherSupported(const TensorInfo& input0,
@@ -611,7 +617,8 @@
                                    reasonIfUnsupported,
                                    input0,
                                    input1,
-                                   output);
+                                   output,
+                                   nullptr);
 }
 
 bool NeonLayerSupport::IsDivisionSupported(const TensorInfo& input0,
@@ -623,7 +630,8 @@
                                    reasonIfUnsupported,
                                    input0,
                                    input1,
-                                   output);
+                                   output,
+                                   nullptr);
 }
 
 bool NeonLayerSupport::IsNormalizationSupported(const TensorInfo& input,
@@ -911,7 +919,8 @@
                                    reasonIfUnsupported,
                                    input0,
                                    input1,
-                                   output);
+                                   output,
+                                   nullptr);
 }
 
 bool NeonLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
diff --git a/src/backends/neon/workloads/NeonAdditionWorkload.cpp b/src/backends/neon/workloads/NeonAdditionWorkload.cpp
index cb0c8a4..9300b31 100644
--- a/src/backends/neon/workloads/NeonAdditionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonAdditionWorkload.cpp
@@ -7,6 +7,8 @@
 #include "NeonWorkloadUtils.hpp"
 
 #include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <aclCommon/ArmComputeUtils.hpp>
+
 #include <armnn/utility/PolymorphicDowncast.hpp>
 #include <backendsCommon/CpuTensorHandle.hpp>
 
@@ -17,16 +19,21 @@
 
 arm_compute::Status NeonAdditionWorkloadValidate(const TensorInfo& input0,
                                                  const TensorInfo& input1,
-                                                 const TensorInfo& output)
+                                                 const TensorInfo& output,
+                                                 const ActivationDescriptor* activationDescriptor)
 {
     const arm_compute::TensorInfo aclInput0 = armcomputetensorutils::BuildArmComputeTensorInfo(input0);
     const arm_compute::TensorInfo aclInput1 = armcomputetensorutils::BuildArmComputeTensorInfo(input1);
     const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
 
+    const arm_compute::ActivationLayerInfo activationInfo = ConvertActivationDescriptorToAclActivationLayerInfo(
+            activationDescriptor);
+
     return arm_compute::NEArithmeticAddition::validate(&aclInput0,
                                                        &aclInput1,
                                                        &aclOutput,
-                                                       arm_compute::ConvertPolicy::SATURATE);
+                                                       arm_compute::ConvertPolicy::SATURATE,
+                                                       activationInfo);
 }
 
 
@@ -40,8 +47,10 @@
     arm_compute::ITensor& input2 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
     arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
 
+    const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
+
     auto layer = std::make_unique<arm_compute::NEArithmeticAddition>();
-    layer->configure(&input1, &input2, &output, arm_compute::ConvertPolicy::SATURATE);
+    layer->configure(&input1, &input2, &output, arm_compute::ConvertPolicy::SATURATE, activationInfo);
     m_AddLayer.reset(layer.release());
 }
 
diff --git a/src/backends/neon/workloads/NeonAdditionWorkload.hpp b/src/backends/neon/workloads/NeonAdditionWorkload.hpp
index 826fb1f..8e43cbd 100644
--- a/src/backends/neon/workloads/NeonAdditionWorkload.hpp
+++ b/src/backends/neon/workloads/NeonAdditionWorkload.hpp
@@ -8,6 +8,7 @@
 #include <backendsCommon/Workload.hpp>
 
 #include <arm_compute/core/Error.h>
+#include <arm_compute/core/Types.h>
 #include <arm_compute/runtime/IFunction.h>
 
 namespace armnn
@@ -15,7 +16,8 @@
 
 arm_compute::Status NeonAdditionWorkloadValidate(const TensorInfo& input0,
                                                  const TensorInfo& input1,
-                                                 const TensorInfo& output);
+                                                 const TensorInfo& output,
+                                                 const ActivationDescriptor* activationDescriptor = nullptr);
 
 class NeonAdditionWorkload : public BaseWorkload<AdditionQueueDescriptor>
 {
diff --git a/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp b/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp
index ff777db..33480fa 100644
--- a/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp
+++ b/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp
@@ -8,7 +8,10 @@
 #include "NeonWorkloadUtils.hpp"
 
 #include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <aclCommon/ArmComputeUtils.hpp>
+
 #include <armnn/utility/PolymorphicDowncast.hpp>
+
 #include <backendsCommon/CpuTensorHandle.hpp>
 
 #include <arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h>
@@ -24,7 +27,8 @@
                                                    const TensorInfo& var,
                                                    const TensorInfo& beta,
                                                    const TensorInfo& gamma,
-                                                   const BatchNormalizationDescriptor& descriptor)
+                                                   const BatchNormalizationDescriptor& descriptor,
+                                                   const ActivationDescriptor* activationDescriptor)
 {
     const arm_compute::TensorInfo aclInputInfo =
           armcomputetensorutils::BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
@@ -39,13 +43,17 @@
     const arm_compute::TensorInfo aclGammaInfo =
           armcomputetensorutils::BuildArmComputeTensorInfo(gamma, descriptor.m_DataLayout);
 
+    const arm_compute::ActivationLayerInfo activationInfo = ConvertActivationDescriptorToAclActivationLayerInfo(
+            activationDescriptor);
+
     return arm_compute::NEBatchNormalizationLayer::validate(&aclInputInfo,
                                                             &aclOutputInfo,
                                                             &aclMeanInfo,
                                                             &aclVarInfo,
                                                             &aclBetaInfo,
                                                             &aclGammaInfo,
-                                                            descriptor.m_Eps);
+                                                            descriptor.m_Eps,
+                                                            activationInfo);
 }
 
 NeonBatchNormalizationWorkload::NeonBatchNormalizationWorkload(
@@ -73,6 +81,8 @@
     m_Beta = std::make_unique<arm_compute::Tensor>();
     BuildArmComputeTensor(*m_Beta, m_Data.m_Beta->GetTensorInfo());
 
+    const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
+
     auto layer = std::make_unique<arm_compute::NEBatchNormalizationLayer>();
     layer->configure(&input,
                      &output,
@@ -80,7 +90,8 @@
                      m_Variance.get(),
                      m_Beta.get(),
                      m_Gamma.get(),
-                     m_Data.m_Parameters.m_Eps);
+                     m_Data.m_Parameters.m_Eps,
+                     activationInfo);
     m_Layer.reset(layer.release());
 
     InitializeArmComputeTensorData(*m_Mean, m_Data.m_Mean);
diff --git a/src/backends/neon/workloads/NeonBatchNormalizationWorkload.hpp b/src/backends/neon/workloads/NeonBatchNormalizationWorkload.hpp
index 3619ea0..fea778f 100644
--- a/src/backends/neon/workloads/NeonBatchNormalizationWorkload.hpp
+++ b/src/backends/neon/workloads/NeonBatchNormalizationWorkload.hpp
@@ -21,7 +21,8 @@
                                                    const TensorInfo& var,
                                                    const TensorInfo& beta,
                                                    const TensorInfo& gamma,
-                                                   const BatchNormalizationDescriptor& descriptor);
+                                                   const BatchNormalizationDescriptor& descriptor,
+                                                   const ActivationDescriptor* activationDescriptor = nullptr);
 
 class NeonBatchNormalizationWorkload : public BaseWorkload<BatchNormalizationQueueDescriptor>
 {
diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
index af6f1ae..fd8be17 100644
--- a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
@@ -6,6 +6,7 @@
 #include "NeonConvolution2dWorkload.hpp"
 
 #include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <aclCommon/ArmComputeUtils.hpp>
 #include <armnn/utility/PolymorphicDowncast.hpp>
 #include <backendsCommon/CpuTensorHandle.hpp>
 #include <neon/workloads/NeonWorkloadUtils.hpp>
@@ -25,7 +26,8 @@
                                                       const Convolution2dDescriptor& descriptor,
                                                       const TensorInfo& weights,
                                                       const Optional<TensorInfo>& biases,
-                                                      bool isFastMathEnabled)
+                                                      bool isFastMathEnabled,
+                                                      const ActivationDescriptor* activationDescriptor)
 {
     const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
     const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
@@ -47,6 +49,9 @@
 
     arm_compute::PadStrideInfo layerInfo = BuildArmComputePadStrideInfo(descriptor);
 
+    const arm_compute::ActivationLayerInfo activationInfo = ConvertActivationDescriptorToAclActivationLayerInfo(
+            activationDescriptor);
+
     return arm_compute::NEConvolutionLayer::validate(&aclInputInfo,
                                                      &aclWeightsInfo,
                                                      optionalAclBiasesInfo,
@@ -54,7 +59,7 @@
                                                      layerInfo,
                                                      arm_compute::WeightsInfo(),
                                                      aclDilationInfo,
-                                                     arm_compute::ActivationLayerInfo(),
+                                                     activationInfo,
                                                      isFastMathEnabled);
 }
 
@@ -92,6 +97,8 @@
     const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(m_Data.m_Parameters.m_DilationX,
                                                                       m_Data.m_Parameters.m_DilationY);
 
+    const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
+
     auto convolutionLayer = std::make_unique<arm_compute::NEConvolutionLayer>(memoryManager);
     convolutionLayer->configure(&input,
                                 m_KernelTensor.get(),
@@ -100,7 +107,7 @@
                                 padStrideInfo,
                                 arm_compute::WeightsInfo(),
                                 aclDilationInfo,
-                                arm_compute::ActivationLayerInfo(),
+                                activationInfo,
                                 isFastMathEnabled);
 
     m_ConvolutionMethod =
@@ -110,7 +117,7 @@
                                                  padStrideInfo,
                                                  arm_compute::WeightsInfo(),
                                                  aclDilationInfo,
-                                                 arm_compute::ActivationLayerInfo(),
+                                                 activationInfo,
                                                  isFastMathEnabled);
 
     m_ConvolutionLayer.reset(convolutionLayer.release());
diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.hpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.hpp
index 860d78b..4b6e58c 100644
--- a/src/backends/neon/workloads/NeonConvolution2dWorkload.hpp
+++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.hpp
@@ -21,7 +21,8 @@
                                                       const Convolution2dDescriptor& descriptor,
                                                       const TensorInfo& weights,
                                                       const Optional<TensorInfo>& biases,
-                                                      bool isFastMathEnabled = false);
+                                                      bool isFastMathEnabled = false,
+                                                      const ActivationDescriptor* activationDescriptor = nullptr);
 
 class NeonConvolution2dWorkload : public BaseWorkload<Convolution2dQueueDescriptor>
 {
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
index a9a3c75..db6bcc3 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
@@ -10,6 +10,7 @@
 #include <armnnUtils/DataLayoutIndexed.hpp>
 
 #include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <aclCommon/ArmComputeUtils.hpp>
 
 #include <neon/NeonLayerSupport.hpp>
 
@@ -29,7 +30,8 @@
                                                              const TensorInfo& output,
                                                              const DepthwiseConvolution2dDescriptor& descriptor,
                                                              const TensorInfo& weights,
-                                                             const Optional<TensorInfo>& biases)
+                                                             const Optional<TensorInfo>& biases,
+                                                             const ActivationDescriptor* activationDescriptor)
 {
     const arm_compute::TensorInfo aclInputInfo  = BuildArmComputeTensorInfo(input,  descriptor.m_DataLayout);
     const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
@@ -59,13 +61,16 @@
     const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(
             descriptor.m_DilationX,descriptor.m_DilationY);
 
+    const arm_compute::ActivationLayerInfo activationInfo = ConvertActivationDescriptorToAclActivationLayerInfo(
+            activationDescriptor);
+
     return arm_compute::NEDepthwiseConvolutionLayer::validate(&aclInputInfo,
                                                               &aclWeightsInfo,
                                                               optionalAclBiasesInfo,
                                                               &aclOutputInfo,
                                                               aclPadStrideInfo,
                                                               aclDepthMultiplier,
-                                                              arm_compute::ActivationLayerInfo(),
+                                                              activationInfo,
                                                               aclDilationInfo);
 }
 
@@ -116,16 +121,18 @@
 
     arm_compute::PadStrideInfo padStrideInfo = BuildArmComputePadStrideInfo(m_Data.m_Parameters);
 
+    const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
+
     m_pDepthwiseConvolutionLayer = std::make_unique<arm_compute::NEDepthwiseConvolutionLayer>();
     static_cast<arm_compute::NEDepthwiseConvolutionLayer*>(
         m_pDepthwiseConvolutionLayer.get())->configure(&input,
-                                                        m_KernelTensor.get(),
-                                                        m_BiasTensor.get(),
-                                                        &output,
-                                                        padStrideInfo,
-                                                        depthMultiplier,
-                                                        arm_compute::ActivationLayerInfo(),
-                                                        aclDilationInfo);
+                                                       m_KernelTensor.get(),
+                                                       m_BiasTensor.get(),
+                                                       &output,
+                                                       padStrideInfo,
+                                                       depthMultiplier,
+                                                       activationInfo,
+                                                       aclDilationInfo);
 
     ARMNN_ASSERT(m_pDepthwiseConvolutionLayer);
 
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.hpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.hpp
index 85932d3..d257b91 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.hpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.hpp
@@ -19,7 +19,9 @@
                                                              const TensorInfo& output,
                                                              const DepthwiseConvolution2dDescriptor& descriptor,
                                                              const TensorInfo& weights,
-                                                             const Optional<TensorInfo>& biases);
+                                                             const Optional<TensorInfo>& biases,
+                                                             const ActivationDescriptor* activationDescriptor
+                                                                     = nullptr);
 
 class NeonDepthwiseConvolutionWorkload : public BaseWorkload<DepthwiseConvolution2dQueueDescriptor>
 {
diff --git a/src/backends/neon/workloads/NeonDivisionWorkload.cpp b/src/backends/neon/workloads/NeonDivisionWorkload.cpp
index fc353f1..1a26d95 100644
--- a/src/backends/neon/workloads/NeonDivisionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDivisionWorkload.cpp
@@ -6,23 +6,31 @@
 #include "NeonDivisionWorkload.hpp"
 
 #include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <aclCommon/ArmComputeUtils.hpp>
+
 #include <armnn/utility/PolymorphicDowncast.hpp>
+
 #include <backendsCommon/CpuTensorHandle.hpp>
 
 namespace armnn
 {
 
 arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo& input0,
-                                                const TensorInfo& input1,
-                                                const TensorInfo& output)
+                                                 const TensorInfo& input1,
+                                                 const TensorInfo& output,
+                                                 const ActivationDescriptor* activationDescriptor)
 {
     const arm_compute::TensorInfo aclInput0 = armcomputetensorutils::BuildArmComputeTensorInfo(input0);
     const arm_compute::TensorInfo aclInput1 = armcomputetensorutils::BuildArmComputeTensorInfo(input1);
     const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
 
+    const arm_compute::ActivationLayerInfo activationInfo = ConvertActivationDescriptorToAclActivationLayerInfo(
+            activationDescriptor);
+
     return arm_compute::NEElementwiseDivision::validate(&aclInput0,
-                                                   &aclInput1,
-                                                   &aclOutput);
+                                                        &aclInput1,
+                                                        &aclOutput,
+                                                        activationInfo);
 }
 
 NeonDivisionWorkload::NeonDivisionWorkload(const DivisionQueueDescriptor& descriptor,
@@ -35,7 +43,9 @@
     arm_compute::ITensor& input1 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
     arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
 
-    m_DivLayer.configure(&input0, &input1, &output);
+    const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
+
+    m_DivLayer.configure(&input0, &input1, &output, activationInfo);
 }
 
 void NeonDivisionWorkload::Execute() const
diff --git a/src/backends/neon/workloads/NeonDivisionWorkload.hpp b/src/backends/neon/workloads/NeonDivisionWorkload.hpp
index 2405d9a..fffe02f 100644
--- a/src/backends/neon/workloads/NeonDivisionWorkload.hpp
+++ b/src/backends/neon/workloads/NeonDivisionWorkload.hpp
@@ -13,8 +13,9 @@
 {
 
 arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo& input0,
-                                                const TensorInfo& input1,
-                                                const TensorInfo& output);
+                                                 const TensorInfo& input1,
+                                                 const TensorInfo& output,
+                                                 const ActivationDescriptor* activationDescriptor = nullptr);
 
 class NeonDivisionWorkload : public BaseWorkload<DivisionQueueDescriptor>
 {
diff --git a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
index e808c60..31489a0 100644
--- a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
+++ b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
@@ -6,9 +6,12 @@
 #include "NeonFullyConnectedWorkload.hpp"
 
 #include "NeonWorkloadUtils.hpp"
+
 #include <aclCommon/ArmComputeTensorUtils.hpp>
 #include <aclCommon/ArmComputeUtils.hpp>
+
 #include <armnn/utility/PolymorphicDowncast.hpp>
+
 #include <backendsCommon/CpuTensorHandle.hpp>
 
 #include <arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h>
@@ -21,7 +24,8 @@
                                                        const TensorInfo& output,
                                                        const TensorInfo& weights,
                                                        const TensorInfo& biases,
-                                                       const FullyConnectedDescriptor& descriptor)
+                                                       const FullyConnectedDescriptor& descriptor,
+                                                       const ActivationDescriptor* activationDescriptor)
 {
     const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input);
     const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output);
@@ -36,8 +40,7 @@
     }
 
     const arm_compute::FullyConnectedLayerInfo fullyConnectedLayerInfo =
-        ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(descriptor);
-
+        ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(descriptor, activationDescriptor);
 
     return arm_compute::NEFullyConnectedLayer::validate(&aclInput,
                                                         &aclWeights,
@@ -64,9 +67,10 @@
         BuildArmComputeTensor(*m_BiasesTensor, m_Data.m_Bias->GetTensorInfo());
     }
 
-    // Construct
-    arm_compute::FullyConnectedLayerInfo fc_info;
-    fc_info.transpose_weights = m_Data.m_Parameters.m_TransposeWeightMatrix;
+    const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
+
+    arm_compute::FullyConnectedLayerInfo fc_info  =
+            ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(descriptor.m_Parameters, activationInfo);
 
     auto layer = std::make_unique<arm_compute::NEFullyConnectedLayer>(memoryManager);
     layer->configure(&input, m_WeightsTensor.get(), m_BiasesTensor.get(), &output, fc_info);
diff --git a/src/backends/neon/workloads/NeonFullyConnectedWorkload.hpp b/src/backends/neon/workloads/NeonFullyConnectedWorkload.hpp
index 1cd8be1..8dc7fdc 100644
--- a/src/backends/neon/workloads/NeonFullyConnectedWorkload.hpp
+++ b/src/backends/neon/workloads/NeonFullyConnectedWorkload.hpp
@@ -21,7 +21,8 @@
                                                        const TensorInfo& output,
                                                        const TensorInfo& weights,
                                                        const TensorInfo& biases,
-                                                       const FullyConnectedDescriptor& descriptor);
+                                                       const FullyConnectedDescriptor& descriptor,
+                                                       const ActivationDescriptor* activationDescriptor = nullptr);
 
 class NeonFullyConnectedWorkload : public BaseWorkload<FullyConnectedQueueDescriptor>
 {
diff --git a/src/backends/neon/workloads/NeonMultiplicationWorkload.cpp b/src/backends/neon/workloads/NeonMultiplicationWorkload.cpp
index 6f78b8e..e4ed195 100644
--- a/src/backends/neon/workloads/NeonMultiplicationWorkload.cpp
+++ b/src/backends/neon/workloads/NeonMultiplicationWorkload.cpp
@@ -7,6 +7,8 @@
 
 #include "NeonWorkloadUtils.hpp"
 
+#include <aclCommon/ArmComputeUtils.hpp>
+
 #include <armnn/utility/PolymorphicDowncast.hpp>
 
 #include <arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h>
@@ -16,7 +18,8 @@
 
 arm_compute::Status NeonMultiplicationWorkloadValidate(const TensorInfo& input0,
                                                        const TensorInfo& input1,
-                                                       const TensorInfo& output)
+                                                       const TensorInfo& output,
+                                                       const ActivationDescriptor* activationDescriptor)
 {
     const arm_compute::TensorInfo aclInput1 = armcomputetensorutils::BuildArmComputeTensorInfo(input0);
     const arm_compute::TensorInfo aclInput2 = armcomputetensorutils::BuildArmComputeTensorInfo(input1);
@@ -26,6 +29,9 @@
                           arm_compute::ConvertPolicy::SATURATE :
                           arm_compute::ConvertPolicy::WRAP;
 
+    const arm_compute::ActivationLayerInfo activationInfo = ConvertActivationDescriptorToAclActivationLayerInfo(
+            activationDescriptor);
+
     // At the time of writing, configure() will fail if a rounding policy other than TO_ZERO is supplied to it,
     // when providing a scale of 1.0 for F32 tensors, even though the provided rounding policy appears to be
     // ignored for F32 tensors.
@@ -34,7 +40,8 @@
                                                             &aclOutput,
                                                             1.0f,
                                                             convertPolicy,
-                                                            arm_compute::RoundingPolicy::TO_ZERO);
+                                                            arm_compute::RoundingPolicy::TO_ZERO,
+                                                            activationInfo);
 }
 
 NeonMultiplicationWorkload::NeonMultiplicationWorkload(const MultiplicationQueueDescriptor& descriptor,
@@ -52,6 +59,8 @@
                           arm_compute::ConvertPolicy::SATURATE :
                           arm_compute::ConvertPolicy::WRAP;
 
+    const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
+
     // At the time of writing, configure() will fail if a rounding policy other than TO_ZERO is supplied to it,
     // when providing a scale of 1.0 for F32 tensors, even though the provided rounding policy appears to be
     // ignored for F32 tensors.
@@ -61,7 +70,8 @@
                      &output,
                      1.0f,
                      convertPolicy,
-                     arm_compute::RoundingPolicy::TO_ZERO);
+                     arm_compute::RoundingPolicy::TO_ZERO,
+                     activationInfo);
     m_PixelWiseMultiplication.reset(layer.release());
 }
 
diff --git a/src/backends/neon/workloads/NeonMultiplicationWorkload.hpp b/src/backends/neon/workloads/NeonMultiplicationWorkload.hpp
index bfbaf77..d2bcd04 100644
--- a/src/backends/neon/workloads/NeonMultiplicationWorkload.hpp
+++ b/src/backends/neon/workloads/NeonMultiplicationWorkload.hpp
@@ -8,6 +8,7 @@
 #include <backendsCommon/Workload.hpp>
 
 #include <arm_compute/core/Error.h>
+#include <arm_compute/core/Types.h>
 #include <arm_compute/runtime/IFunction.h>
 
 #include <memory>
@@ -16,7 +17,8 @@
 {
 arm_compute::Status NeonMultiplicationWorkloadValidate(const TensorInfo& input0,
                                                        const TensorInfo& input1,
-                                                       const TensorInfo& output);
+                                                       const TensorInfo& output,
+                                                       const ActivationDescriptor* activationDescriptor = nullptr);
 
 class NeonMultiplicationWorkload : public BaseWorkload<MultiplicationQueueDescriptor>
 {
diff --git a/src/backends/neon/workloads/NeonSubtractionWorkload.cpp b/src/backends/neon/workloads/NeonSubtractionWorkload.cpp
index ccc2bfe..21f0f6f 100644
--- a/src/backends/neon/workloads/NeonSubtractionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSubtractionWorkload.cpp
@@ -6,8 +6,12 @@
 #include "NeonSubtractionWorkload.hpp"
 
 #include "NeonWorkloadUtils.hpp"
+
 #include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <aclCommon/ArmComputeUtils.hpp>
+
 #include <armnn/utility/PolymorphicDowncast.hpp>
+
 #include <backendsCommon/CpuTensorHandle.hpp>
 
 #include <arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h>
@@ -17,16 +21,21 @@
 
 arm_compute::Status NeonSubtractionWorkloadValidate(const TensorInfo& input0,
                                                     const TensorInfo& input1,
-                                                    const TensorInfo& output)
+                                                    const TensorInfo& output,
+                                                    const ActivationDescriptor* activationDescriptor)
 {
     const arm_compute::TensorInfo aclInput0 = armcomputetensorutils::BuildArmComputeTensorInfo(input0);
     const arm_compute::TensorInfo aclInput1 = armcomputetensorutils::BuildArmComputeTensorInfo(input1);
     const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
 
+    const arm_compute::ActivationLayerInfo activationInfo = ConvertActivationDescriptorToAclActivationLayerInfo(
+            activationDescriptor);
+
     return arm_compute::NEArithmeticSubtraction::validate(&aclInput0,
                                                           &aclInput1,
                                                           &aclOutput,
-                                                          arm_compute::ConvertPolicy::SATURATE);
+                                                          arm_compute::ConvertPolicy::SATURATE,
+                                                          activationInfo);
 }
 
 NeonSubtractionWorkload::NeonSubtractionWorkload(const SubtractionQueueDescriptor& descriptor,
@@ -39,8 +48,10 @@
     arm_compute::ITensor& input2 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
     arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
 
+    const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
+
     auto layer = std::make_unique<arm_compute::NEArithmeticSubtraction>();
-    layer->configure(&input1, &input2, &output, arm_compute::ConvertPolicy::SATURATE);
+    layer->configure(&input1, &input2, &output, arm_compute::ConvertPolicy::SATURATE, activationInfo);
     m_SubLayer.reset(layer.release());
 }
 
diff --git a/src/backends/neon/workloads/NeonSubtractionWorkload.hpp b/src/backends/neon/workloads/NeonSubtractionWorkload.hpp
index 3326f8b..19d0811 100644
--- a/src/backends/neon/workloads/NeonSubtractionWorkload.hpp
+++ b/src/backends/neon/workloads/NeonSubtractionWorkload.hpp
@@ -8,6 +8,7 @@
 #include <backendsCommon/Workload.hpp>
 
 #include <arm_compute/core/Error.h>
+#include <arm_compute/core/Types.h>
 #include <arm_compute/runtime/IFunction.h>
 
 #include <memory>
@@ -17,7 +18,8 @@
 
 arm_compute::Status NeonSubtractionWorkloadValidate(const TensorInfo& input0,
                                                     const TensorInfo& input1,
-                                                    const TensorInfo& output);
+                                                    const TensorInfo& output,
+                                                    const ActivationDescriptor* activationDescriptor = nullptr);
 
 class NeonSubtractionWorkload : public BaseWorkload<SubtractionQueueDescriptor>
 {
diff --git a/src/profiling/test/ProfilingTestUtils.cpp b/src/profiling/test/ProfilingTestUtils.cpp
index 09639bf..93d0b10 100644
--- a/src/profiling/test/ProfilingTestUtils.cpp
+++ b/src/profiling/test/ProfilingTestUtils.cpp
@@ -413,20 +413,20 @@
     conv2dDesc.m_BiasEnabled = true;
     IConnectableLayer* conv2d = net->AddConvolution2dLayer(conv2dDesc, weights, optionalBiases);
 
-    // Activation layer
-    armnn::ActivationDescriptor activationDesc;
-    armnn::IConnectableLayer* const activation = net->AddActivationLayer(activationDesc, "activation");
+    // Abs layer
+    armnn::ElementwiseUnaryDescriptor absDesc;
+    armnn::IConnectableLayer* const abs = net->AddElementwiseUnaryLayer(absDesc, "abs");
 
     // Output layer
     IConnectableLayer* output = net->AddOutputLayer(0, "output");
 
     input->GetOutputSlot(0).Connect(conv2d->GetInputSlot(0));
-    conv2d->GetOutputSlot(0).Connect(activation->GetInputSlot(0));
-    activation->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+    conv2d->GetOutputSlot(0).Connect(abs->GetInputSlot(0));
+    abs->GetOutputSlot(0).Connect(output->GetInputSlot(0));
 
     input->GetOutputSlot(0).SetTensorInfo(inputInfo);
     conv2d->GetOutputSlot(0).SetTensorInfo(outputInfo);
-    activation->GetOutputSlot(0).SetTensorInfo(outputInfo);
+    abs->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     // optimize the network
     std::vector<armnn::BackendId> backends = { backendId };
@@ -633,70 +633,70 @@
                                                offset);
     BOOST_TEST_MESSAGE("CONV2D LAYER - WORKLOAD CHILD RELATIONSHIP OK");
 
-    // Activation layer
-    // Activation layer entity
-    VerifyTimelineEntityBinaryPacketData(activation->GetGuid(), readableData, offset);
-    BOOST_TEST_MESSAGE("ACTIVATION ENTITY OK");
+    // Abs layer
+    // Abs layer entity
+    VerifyTimelineEntityBinaryPacketData(abs->GetGuid(), readableData, offset);
+    BOOST_TEST_MESSAGE("ABS ENTITY OK");
 
     // Name entity
-    ProfilingGuid activationLabelGuid = VerifyTimelineLabelBinaryPacketData(
-        EmptyOptional(), "activation", readableData, offset);
-    BOOST_TEST_MESSAGE("ACTIVATION NAME LABEL OK");
+    ProfilingGuid absLabelGuid = VerifyTimelineLabelBinaryPacketData(
+        EmptyOptional(), "abs", readableData, offset);
+    BOOST_TEST_MESSAGE("ABS NAME LABEL OK");
 
     // Entity - Name relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
                                                EmptyOptional(),
-                                               activation->GetGuid(),
-                                               activationLabelGuid,
+                                               abs->GetGuid(),
+                                               absLabelGuid,
                                                LabelsAndEventClasses::NAME_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("ACTIVATION LAYER - NAME RELATIONSHIP OK");
+    BOOST_TEST_MESSAGE("ABS LAYER - NAME RELATIONSHIP OK");
 
     // Entity - Type relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
                                                EmptyOptional(),
-                                               activation->GetGuid(),
+                                               abs->GetGuid(),
                                                LabelsAndEventClasses::LAYER_GUID,
                                                LabelsAndEventClasses::TYPE_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("ACTIVATION LAYER TYPE RELATIONSHIP OK");
+    BOOST_TEST_MESSAGE("ABS LAYER TYPE RELATIONSHIP OK");
 
-    // Network - Activation layer relationship
+    // Network - Abs layer relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
                                                EmptyOptional(),
                                                optNetGuid,
-                                               activation->GetGuid(),
+                                               abs->GetGuid(),
                                                LabelsAndEventClasses::CHILD_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("NETWORK - ACTIVATION LAYER CHILD RELATIONSHIP OK");
+    BOOST_TEST_MESSAGE("NETWORK - ABS LAYER CHILD RELATIONSHIP OK");
 
-    // Conv2d layer - Activation layer relationship
+    // Conv2d layer - Abs layer relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
                                                EmptyOptional(),
                                                conv2d->GetGuid(),
-                                               activation->GetGuid(),
+                                               abs->GetGuid(),
                                                LabelsAndEventClasses::CONNECTION_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("CONV2D LAYER - ACTIVATION LAYER CONNECTION OK");
+    BOOST_TEST_MESSAGE("CONV2D LAYER - ABS LAYER CONNECTION OK");
 
-    // Activation workload
-    // Activation workload entity
-    ProfilingGuid activationWorkloadGuid = VerifyTimelineEntityBinaryPacketData(EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("ACTIVATION WORKLOAD ENTITY OK");
+    // Abs workload
+    // Abs workload entity
+    ProfilingGuid absWorkloadGuid = VerifyTimelineEntityBinaryPacketData(EmptyOptional(), readableData, offset);
+    BOOST_TEST_MESSAGE("ABS WORKLOAD ENTITY OK");
 
     // Entity - Type relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
                                                EmptyOptional(),
-                                               activationWorkloadGuid,
+                                               absWorkloadGuid,
                                                LabelsAndEventClasses::WORKLOAD_GUID,
                                                LabelsAndEventClasses::TYPE_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("ACTIVATION WORKLAD TYPE RELATIONSHIP OK");
+    BOOST_TEST_MESSAGE("ABS WORKLAD TYPE RELATIONSHIP OK");
 
     // BackendId entity
     VerifyTimelineLabelBinaryPacketData(EmptyOptional(), backendId.Get(), readableData, offset);
@@ -705,22 +705,22 @@
     // Entity - BackendId relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
                                                EmptyOptional(),
-                                               activationWorkloadGuid,
+                                               absWorkloadGuid,
                                                backendIdLabelGuid,
                                                LabelsAndEventClasses::BACKENDID_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("ACTIVATION WORKLOAD BACKEND ID RELATIONSHIP OK");
+    BOOST_TEST_MESSAGE("ABS WORKLOAD BACKEND ID RELATIONSHIP OK");
 
-    // Activation layer - Activation workload relationship
+    // Abs layer - Abs workload relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
                                                EmptyOptional(),
-                                               activation->GetGuid(),
-                                               activationWorkloadGuid,
+                                               abs->GetGuid(),
+                                               absWorkloadGuid,
                                                LabelsAndEventClasses::CHILD_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("ACTIVATION LAYER - WORKLOAD CHILD RELATIONSHIP OK");
+    BOOST_TEST_MESSAGE("ABS LAYER - WORKLOAD CHILD RELATIONSHIP OK");
 
     // Output layer
     // Output layer entity
@@ -761,15 +761,15 @@
                                                offset);
     BOOST_TEST_MESSAGE("NETWORK - OUTPUT LAYER CHILD RELATIONSHIP OK");
 
-    // Activation layer - Output layer relationship
+    // Abs layer - Output layer relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
                                                EmptyOptional(),
-                                               activation->GetGuid(),
+                                               abs->GetGuid(),
                                                output->GetGuid(),
                                                LabelsAndEventClasses::CONNECTION_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("ACTIVATION LAYER - OUTPUT LAYER CONNECTION OK");
+    BOOST_TEST_MESSAGE("ABS LAYER - OUTPUT LAYER CONNECTION OK");
 
     bufferManager.MarkRead(readableBuffer);
 
@@ -1100,73 +1100,73 @@
                                                offset);
     BOOST_TEST_MESSAGE("CONV2D WORKLOAD EXECUTION END OF LIFE RELATIONSHIP OK");
 
-    // Activation workload execution
-    // Activation workload execution entity
-    ProfilingGuid activationWorkloadExecutionGuid = VerifyTimelineEntityBinaryPacketData(
+    // Abs workload execution
+    // Abs workload execution entity
+    ProfilingGuid absWorkloadExecutionGuid = VerifyTimelineEntityBinaryPacketData(
         EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("ACTIVATION WORKLOAD EXECUTION ENTITY OK");
+    BOOST_TEST_MESSAGE("ABS WORKLOAD EXECUTION ENTITY OK");
 
     // Entity - Type relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
                                                EmptyOptional(),
-                                               activationWorkloadExecutionGuid,
+                                               absWorkloadExecutionGuid,
                                                LabelsAndEventClasses::WORKLOAD_EXECUTION_GUID,
                                                LabelsAndEventClasses::TYPE_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("ACTIVATION WORKLOAD EXECUTION TYPE RELATIONSHIP OK");
+    BOOST_TEST_MESSAGE("ABS WORKLOAD EXECUTION TYPE RELATIONSHIP OK");
 
     // Inference - Workload execution relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
                                                EmptyOptional(),
                                                inferenceGuid,
-                                               activationWorkloadExecutionGuid,
+                                               absWorkloadExecutionGuid,
                                                LabelsAndEventClasses::CHILD_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INFERENCE - ACTIVATION WORKLOAD EXECUTION CHILD RELATIONSHIP OK");
+    BOOST_TEST_MESSAGE("INFERENCE - ABS WORKLOAD EXECUTION CHILD RELATIONSHIP OK");
 
     // Workload - Workload execution relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
                                                EmptyOptional(),
-                                               activationWorkloadGuid,
-                                               activationWorkloadExecutionGuid,
+                                               absWorkloadGuid,
+                                               absWorkloadExecutionGuid,
                                                LabelsAndEventClasses::EXECUTION_OF_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("ACTIVATION WORKLOAD - ACTIVATION WORKLOAD EXECUTION RELATIONSHIP OK");
+    BOOST_TEST_MESSAGE("ABS WORKLOAD - ABS WORKLOAD EXECUTION RELATIONSHIP OK");
 
-    // Start Activation workload execution life
+    // Start Abs workload execution life
     // Event packet - timeline, threadId, eventGuid
-    ProfilingGuid activationWorkloadExecutionSOLEventGuid = VerifyTimelineEventBinaryPacket(
+    ProfilingGuid absWorkloadExecutionSOLEventGuid = VerifyTimelineEventBinaryPacket(
         EmptyOptional(), EmptyOptional(), EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("ACTIVATION WORKLOAD EXECUTION START OF LIFE EVENT OK");
+    BOOST_TEST_MESSAGE("ABS WORKLOAD EXECUTION START OF LIFE EVENT OK");
 
-    // Activation workload execution - event relationship
+    // Abs workload execution - event relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
                                                EmptyOptional(),
-                                               activationWorkloadExecutionGuid,
-                                               activationWorkloadExecutionSOLEventGuid,
+                                               absWorkloadExecutionGuid,
+                                               absWorkloadExecutionSOLEventGuid,
                                                LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("ACTIVATION WORKLOAD EXECUTION START OF LIFE RELATIONSHIP OK");
+    BOOST_TEST_MESSAGE("ABS WORKLOAD EXECUTION START OF LIFE RELATIONSHIP OK");
 
-    // End of Activation workload execution life
+    // End of Abs workload execution life
     // Event packet - timeline, threadId, eventGuid
-    ProfilingGuid activationWorkloadExecutionEOLEventGuid = VerifyTimelineEventBinaryPacket(
+    ProfilingGuid absWorkloadExecutionEOLEventGuid = VerifyTimelineEventBinaryPacket(
         EmptyOptional(), EmptyOptional(), EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("ACTIVATION WORKLOAD EXECUTION END OF LIFE EVENT OK");
+    BOOST_TEST_MESSAGE("ABS WORKLOAD EXECUTION END OF LIFE EVENT OK");
 
-    // Activation workload execution - event relationship
+    // Abs workload execution - event relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
                                                EmptyOptional(),
-                                               activationWorkloadExecutionGuid,
-                                               activationWorkloadExecutionEOLEventGuid,
+                                               absWorkloadExecutionGuid,
+                                               absWorkloadExecutionEOLEventGuid,
                                                LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("ACTIVATION WORKLOAD EXECUTION END OF LIFE RELATIONSHIP OK");
+    BOOST_TEST_MESSAGE("ABS WORKLOAD EXECUTION END OF LIFE RELATIONSHIP OK");
 
     // Output workload execution
     // Output workload execution entity