IVGCVSW-4259 Add frontend and reference workload for UnaryOperationLayer

* Added new layer named ElementwiseUnary
* Deprecated existing Abs/Rsqrt layer functions
* Updated existing Abs/Rsqrt test infrastructure to use new layer
* Added boilerplate for new Exp,Neg,Sqrt elemwise op layers
* AbsQuantize test removed pending future commit
* Serialization support added

!android-nn-driver:2550

Change-Id: Ic595c645925e17b45db568187fd05646daf2e87f
Signed-off-by: josh minor <josh.minor@arm.com>
diff --git a/Android.mk b/Android.mk
index 60d1f7b..86f1602 100644
--- a/Android.mk
+++ b/Android.mk
@@ -120,7 +120,6 @@
         src/armnnUtils/NetworkSockets.cpp \
         src/armnnUtils/Filesystem.cpp \
         src/armnnUtils/Processes.cpp \
-        src/armnn/layers/AbsLayer.cpp \
         src/armnn/layers/ActivationLayer.cpp \
         src/armnn/layers/AdditionLayer.cpp \
         src/armnn/layers/ArgMinMaxLayer.cpp \
@@ -139,6 +138,7 @@
         src/armnn/layers/DetectionPostProcessLayer.cpp \
         src/armnn/layers/DivisionLayer.cpp \
         src/armnn/layers/ElementwiseBaseLayer.cpp \
+        src/armnn/layers/ElementwiseUnaryLayer.cpp \
         src/armnn/layers/FakeQuantizationLayer.cpp \
         src/armnn/layers/FloorLayer.cpp \
         src/armnn/layers/FullyConnectedLayer.cpp \
@@ -166,7 +166,6 @@
         src/armnn/layers/QuantizedLstmLayer.cpp \
         src/armnn/layers/ReshapeLayer.cpp \
         src/armnn/layers/ResizeLayer.cpp \
-        src/armnn/layers/RsqrtLayer.cpp \
         src/armnn/layers/SliceLayer.cpp \
         src/armnn/layers/SoftmaxLayer.cpp \
         src/armnn/layers/SpaceToBatchNdLayer.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 14c2c0c..91b9909 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -244,8 +244,6 @@
     include/armnn/Version.hpp
     src/armnn/layers/LayerCloneBase.hpp
     src/armnn/layers/LayerWithParameters.hpp
-    src/armnn/layers/AbsLayer.hpp
-    src/armnn/layers/AbsLayer.cpp
     src/armnn/layers/ActivationLayer.hpp
     src/armnn/layers/ActivationLayer.cpp
     src/armnn/layers/AdditionLayer.hpp
@@ -280,6 +278,8 @@
     src/armnn/layers/DetectionPostProcessLayer.cpp
     src/armnn/layers/ElementwiseBaseLayer.hpp
     src/armnn/layers/ElementwiseBaseLayer.cpp
+    src/armnn/layers/ElementwiseUnaryLayer.hpp
+    src/armnn/layers/ElementwiseUnaryLayer.cpp
     src/armnn/layers/FakeQuantizationLayer.hpp
     src/armnn/layers/FakeQuantizationLayer.cpp
     src/armnn/layers/FloorLayer.hpp
@@ -336,8 +336,6 @@
     src/armnn/layers/ReshapeLayer.cpp
     src/armnn/layers/ResizeLayer.hpp
     src/armnn/layers/ResizeLayer.cpp
-    src/armnn/layers/RsqrtLayer.cpp
-    src/armnn/layers/RsqrtLayer.hpp
     src/armnn/layers/SliceLayer.cpp
     src/armnn/layers/SliceLayer.hpp
     src/armnn/layers/SoftmaxLayer.hpp
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index ba9a56a..45c0f42 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -78,6 +78,26 @@
     ComparisonOperation m_Operation;
 };
 
+/// A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer
+struct ElementwiseUnaryDescriptor
+{
+    ElementwiseUnaryDescriptor()
+        : ElementwiseUnaryDescriptor(UnaryOperation::Abs)
+    {}
+
+    ElementwiseUnaryDescriptor(UnaryOperation operation)
+        : m_Operation(operation)
+    {}
+
+    bool operator ==(const ElementwiseUnaryDescriptor &rhs) const
+    {
+        return m_Operation == rhs.m_Operation;
+    }
+
+    /// Specifies the elementwiseUnary operation to execute
+    UnaryOperation m_Operation;
+};
+
 /// A PermuteDescriptor for the PermuteLayer.
 struct PermuteDescriptor
 {
diff --git a/include/armnn/DescriptorsFwd.hpp b/include/armnn/DescriptorsFwd.hpp
index cfdef8a..d03c61d 100644
--- a/include/armnn/DescriptorsFwd.hpp
+++ b/include/armnn/DescriptorsFwd.hpp
@@ -16,6 +16,7 @@
 struct Convolution2dDescriptor;
 struct DepthwiseConvolution2dDescriptor;
 struct DetectionPostProcessDescriptor;
+struct ElementwiseUnaryDescriptor;
 struct FakeQuantizationDescriptor;
 struct FullyConnectedDescriptor;
 struct InstanceNormalizationDescriptor;
diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp
index 4522002..1615d3e 100644
--- a/include/armnn/ILayerSupport.hpp
+++ b/include/armnn/ILayerSupport.hpp
@@ -27,6 +27,7 @@
     virtual ~ILayerSupport() {}
 
 public:
+    ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
     virtual bool IsAbsSupported(const TensorInfo& input,
                                 const TensorInfo& output,
                                 Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
@@ -133,6 +134,11 @@
                                      const TensorInfo& output,
                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
 
+    virtual bool IsElementwiseUnarySupported(const TensorInfo& input,
+                                             const TensorInfo& output,
+                                             const ElementwiseUnaryDescriptor& descriptor,
+                                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+
     ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
     virtual bool IsEqualSupported(const TensorInfo& input0,
                                   const TensorInfo& input1,
@@ -292,6 +298,7 @@
                                    const ResizeDescriptor& descriptor,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
 
+    ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
     virtual bool IsRsqrtSupported(const TensorInfo& input,
                                   const TensorInfo& output,
                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
diff --git a/include/armnn/ILayerVisitor.hpp b/include/armnn/ILayerVisitor.hpp
index 9669b3a..46f9e56 100644
--- a/include/armnn/ILayerVisitor.hpp
+++ b/include/armnn/ILayerVisitor.hpp
@@ -24,6 +24,7 @@
     /// function is invoked.
     /// @param layer - pointer to the layer which is calling back to this visit function.
     /// @param name - Optional name for the layer.
+    ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead")
     virtual void VisitAbsLayer(const IConnectableLayer* layer,
                                const char* name = nullptr) = 0;
 
@@ -168,6 +169,14 @@
     virtual void VisitDivisionLayer(const IConnectableLayer* layer,
                                     const char* name = nullptr) = 0;
 
+    /// Function a ElementwiseUnary layer should call back to when its Accept(ILayerVisitor&) function is invoked.
+    /// @param layer - pointer to the layer which is calling back to this visit function.
+    /// @param elementwiseUnaryDescriptor - Description of the layer.
+    /// @param name - Optional name for the layer.
+    virtual void VisitElementwiseUnaryLayer(const IConnectableLayer* layer,
+                                            const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
+                                            const char* name = nullptr) = 0;
+
     /// Function an Equal layer should call back to when its Accept(ILayerVisitor&) function is invoked.
     /// @param layer - pointer to the layer which is calling back to this visit function.
     /// @param name - Optional name for the layer.
@@ -388,6 +397,7 @@
     /// function is invoked.
     /// @param layer - pointer to the layer which is calling back to this visit function.
     /// @param name - Optional name for the layer.
+    ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead")
     virtual void VisitRsqrtLayer(const IConnectableLayer* layer,
                                  const char* name = nullptr) = 0;
 
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index 647f072..1b1c874 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -196,6 +196,13 @@
         const ConstTensor& anchors,
         const char* name = nullptr) = 0;
 
+    /// Add an ElementwiseUnary layer to the network.
+    /// @param name - Optional name for the layer.
+    /// @param desc - Descriptor for the elementwiseUnary operation.
+    /// @ return - Interface for configuring the layer.
+    virtual IConnectableLayer* AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
+                                                        const char* name = nullptr) = 0;
+
     /// Adds a fully connected layer to the network.
     /// @param fullyConnectedDescriptor - Description of the fully connected layer.
     /// @param weights - Tensor for the weights data.
@@ -297,6 +304,7 @@
     /// Add absolute layer to the network.
     /// @param name - Optional name for the layer.
     /// @ return - Interface for configuring the layer.
+    ARMNN_DEPRECATED_MSG("Use AddElementwiseUnaryLayer instead")
     virtual IConnectableLayer* AddAbsLayer(const char* name = nullptr) = 0;
 
     /// Adds an addition layer to the network.
@@ -474,6 +482,7 @@
     /// Add Reciprocal of square root layer to the network.
     /// @param name - Optional name for the layer.
     /// @ return - Interface for configuring the layer.
+    ARMNN_DEPRECATED_MSG("Use AddElementwiseUnaryLayer instead")
     virtual IConnectableLayer* AddRsqrtLayer(const char* name = nullptr) = 0;
 
     /// Add Gather layer to the network.
diff --git a/include/armnn/LayerVisitorBase.hpp b/include/armnn/LayerVisitorBase.hpp
index 388fc6f..6fd9a66 100644
--- a/include/armnn/LayerVisitorBase.hpp
+++ b/include/armnn/LayerVisitorBase.hpp
@@ -94,6 +94,10 @@
     void VisitDivisionLayer(const IConnectableLayer*,
                             const char*) override { DefaultPolicy::Apply(__func__); }
 
+    void VisitElementwiseUnaryLayer(const IConnectableLayer*,
+                                    const ElementwiseUnaryDescriptor&,
+                                    const char*) override { DefaultPolicy::Apply(__func__); }
+
     void VisitEqualLayer(const IConnectableLayer*,
                          const char*) override { DefaultPolicy::Apply(__func__); }
 
diff --git a/include/armnn/Types.hpp b/include/armnn/Types.hpp
index 5ea214e..1ab5660 100644
--- a/include/armnn/Types.hpp
+++ b/include/armnn/Types.hpp
@@ -80,6 +80,15 @@
     NotEqual       = 5
 };
 
+enum class UnaryOperation
+{
+    Abs   = 0,
+    Exp   = 1,
+    Sqrt  = 2,
+    Rsqrt = 3,
+    Neg   = 4
+};
+
 enum class PoolingAlgorithm
 {
     Max     = 0,
diff --git a/include/armnn/TypesUtils.hpp b/include/armnn/TypesUtils.hpp
index 8157d4f..790f57a 100644
--- a/include/armnn/TypesUtils.hpp
+++ b/include/armnn/TypesUtils.hpp
@@ -66,6 +66,19 @@
     }
 }
 
+constexpr char const* GetUnaryOperationAsCString(UnaryOperation operation)
+{
+    switch (operation)
+    {
+        case UnaryOperation::Abs:   return "Abs";
+        case UnaryOperation::Exp:   return "Exp";
+        case UnaryOperation::Sqrt:  return "Sqrt";
+        case UnaryOperation::Rsqrt: return "Rsqrt";
+        case UnaryOperation::Neg:   return "Neg";
+        default:                    return "Unknown";
+    }
+}
+
 constexpr char const* GetPoolingAlgorithmAsCString(PoolingAlgorithm pooling)
 {
     switch (pooling)
diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp
index 8c2a0f7..10e7f50 100644
--- a/src/armnn/InternalTypes.cpp
+++ b/src/armnn/InternalTypes.cpp
@@ -14,7 +14,6 @@
 {
     switch (type)
     {
-        case LayerType::Abs: return "Abs";
         case LayerType::Activation: return "Activation";
         case LayerType::Addition: return "Addition";
         case LayerType::ArgMinMax: return "ArgMinMax";
@@ -32,6 +31,7 @@
         case LayerType::Dequantize: return "Dequantize";
         case LayerType::DetectionPostProcess: return "DetectionPostProcess";
         case LayerType::Division: return "Division";
+        case LayerType::ElementwiseUnary: return "ElementwiseUnary";
         case LayerType::FakeQuantization: return "FakeQuantization";
         case LayerType::Floor: return "Floor";
         case LayerType::FullyConnected: return "FullyConnected";
@@ -58,7 +58,6 @@
         case LayerType::Quantize:  return "Quantize";
         case LayerType::QuantizedLstm: return "QuantizedLstm";
         case LayerType::Reshape: return "Reshape";
-        case LayerType::Rsqrt: return "Rsqrt";
         case LayerType::Resize: return "Resize";
         case LayerType::Slice: return "Slice";
         case LayerType::Softmax: return "Softmax";
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index 36e7280..2d7be3c 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -14,8 +14,7 @@
 enum class LayerType
 {
     FirstLayer,
-    Abs = FirstLayer,
-    Activation,
+    Activation = FirstLayer,
     Addition,
     ArgMinMax,
     BatchNormalization,
@@ -32,6 +31,7 @@
     Dequantize,
     DetectionPostProcess,
     Division,
+    ElementwiseUnary,
     FakeQuantization,
     Floor,
     FullyConnected,
@@ -59,7 +59,6 @@
     QuantizedLstm,
     Reshape,
     Resize,
-    Rsqrt,
     Slice,
     Softmax,
     SpaceToBatchNd,
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index dac8838..08d91fc 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -570,7 +570,11 @@
                       char* reasonIfUnsupported,
                       size_t reasonIfUnsupportedMaxLength)
 {
-    FORWARD_LAYER_SUPPORT_FUNC(backend, IsRsqrtSupported, input, output);
+    FORWARD_LAYER_SUPPORT_FUNC(backend,
+                               IsElementwiseUnarySupported,
+                               input,
+                               output,
+                               ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt));
 }
 
 bool IsSoftmaxSupported(const BackendId& backend,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index 13bf900..2d486f4 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -6,7 +6,6 @@
 
 #include "InternalTypes.hpp"
 
-#include "layers/AbsLayer.hpp"
 #include "layers/ActivationLayer.hpp"
 #include "layers/AdditionLayer.hpp"
 #include "layers/ArgMinMaxLayer.hpp"
@@ -24,6 +23,7 @@
 #include "layers/DequantizeLayer.hpp"
 #include "layers/DetectionPostProcessLayer.hpp"
 #include "layers/DivisionLayer.hpp"
+#include "layers/ElementwiseUnaryLayer.hpp"
 #include "layers/FakeQuantizationLayer.hpp"
 #include "layers/FloorLayer.hpp"
 #include "layers/FullyConnectedLayer.hpp"
@@ -51,7 +51,6 @@
 #include "layers/QuantizedLstmLayer.hpp"
 #include "layers/ReshapeLayer.hpp"
 #include "layers/ResizeLayer.hpp"
-#include "layers/RsqrtLayer.hpp"
 #include "layers/SliceLayer.hpp"
 #include "layers/SoftmaxLayer.hpp"
 #include "layers/SpaceToBatchNdLayer.hpp"
@@ -91,7 +90,6 @@
 
 #define DECLARE_LAYER(LayerName) DECLARE_LAYER_IMPL(, LayerName)
 
-DECLARE_LAYER(Abs)
 DECLARE_LAYER(Activation)
 DECLARE_LAYER(Addition)
 DECLARE_LAYER(ArgMinMax)
@@ -109,6 +107,7 @@
 DECLARE_LAYER(Dequantize)
 DECLARE_LAYER(DetectionPostProcess)
 DECLARE_LAYER(Division)
+DECLARE_LAYER(ElementwiseUnary)
 DECLARE_LAYER(FakeQuantization)
 DECLARE_LAYER(Floor)
 DECLARE_LAYER(FullyConnected)
@@ -136,7 +135,6 @@
 DECLARE_LAYER(QuantizedLstm)
 DECLARE_LAYER(Reshape)
 DECLARE_LAYER(Resize)
-DECLARE_LAYER(Rsqrt)
 DECLARE_LAYER(Slice)
 DECLARE_LAYER(Softmax)
 DECLARE_LAYER(SpaceToBatchNd)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 43c79c8..7edc624 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -967,6 +967,12 @@
     return m_Graph->AddLayer<ComparisonLayer>(comparisonDescriptor, name);
 }
 
+IConnectableLayer* Network::AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
+                                                     const char* name)
+{
+    return m_Graph->AddLayer<ElementwiseUnaryLayer>(elementwiseUnaryDescriptor, name);
+}
+
 IConnectableLayer* Network::AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
                                                        const ConstTensor& weights,
                                                        const Optional<ConstTensor>& biases,
@@ -1200,7 +1206,7 @@
 
 IConnectableLayer* Network::AddAbsLayer(const char * name)
 {
-    return m_Graph->AddLayer<AbsLayer>(name);
+    return AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Abs), name);
 }
 
 IConnectableLayer* Network::AddAdditionLayer(const char* name)
@@ -1475,7 +1481,7 @@
 
 IConnectableLayer* Network::AddRsqrtLayer(const char * name)
 {
-    return m_Graph->AddLayer<RsqrtLayer>(name);
+    return AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt), name);
 }
 
 IConnectableLayer* Network::AddGatherLayer(const char* name)
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 0a11941..23a8e47 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -95,6 +95,9 @@
         const ConstTensor& anchors,
         const char* name = nullptr) override;
 
+    IConnectableLayer* AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
+                                                const char* name = nullptr) override;
+
     IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
                                               const ConstTensor& weights,
                                               const Optional<ConstTensor>& biases,
@@ -137,6 +140,7 @@
     IConnectableLayer* AddMergerLayer(const MergerDescriptor& mergerDescriptor,
                                       const char* name = nullptr) override;
 
+    ARMNN_DEPRECATED_MSG("Use AddElementwiseUnaryLayer instead")
     IConnectableLayer* AddAbsLayer(const char* name = nullptr) override;
 
     IConnectableLayer* AddAdditionLayer(const char* name = nullptr) override;
@@ -208,6 +212,7 @@
     ARMNN_DEPRECATED_MSG("Use AddComparisonLayer instead")
     IConnectableLayer* AddEqualLayer(const char* name = nullptr) override;
 
+    ARMNN_DEPRECATED_MSG("Use AddElementwiseUnaryLayer instead")
     IConnectableLayer* AddRsqrtLayer(const char* name = nullptr) override;
 
     IConnectableLayer* AddMergeLayer(const char* name = nullptr) override;
diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp
index 4b80b02..51818eb 100644
--- a/src/armnn/QuantizerVisitor.cpp
+++ b/src/armnn/QuantizerVisitor.cpp
@@ -115,9 +115,7 @@
 
 void QuantizerVisitor::VisitAbsLayer(const IConnectableLayer* layer, const char* name)
 {
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddAbsLayer(name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
+    VisitElementwiseUnaryLayer(layer, ElementwiseUnaryDescriptor(UnaryOperation::Abs), name);
 }
 
 void QuantizerVisitor::VisitActivationLayer(const IConnectableLayer* layer,
@@ -275,6 +273,15 @@
     SetQuantizedInputConnections(layer, newLayer);
 }
 
+void QuantizerVisitor::VisitElementwiseUnaryLayer(const IConnectableLayer* layer,
+                                                  const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
+                                                  const char* name)
+{
+    IConnectableLayer* newLayer = m_QuantizedNetwork->AddElementwiseUnaryLayer(elementwiseUnaryDescriptor, name);
+    RecordLayer(layer, newLayer);
+    SetQuantizedInputConnections(layer, newLayer);
+}
+
 void QuantizerVisitor::VisitFullyConnectedLayer(const IConnectableLayer *layer,
                                                 const FullyConnectedDescriptor& desc,
                                                 const ConstTensor& weights,
@@ -450,12 +457,9 @@
     SetQuantizedInputConnections(layer, newLayer);
 }
 
-void QuantizerVisitor::VisitRsqrtLayer(const IConnectableLayer* layer,
-                                       const char* name)
+void QuantizerVisitor::VisitRsqrtLayer(const IConnectableLayer* layer, const char* name)
 {
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddRsqrtLayer(name);
-    RecordLayer(layer, newLayer);
-    SetQuantizedInputConnections(layer, newLayer);
+    VisitElementwiseUnaryLayer(layer, ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt), name);
 }
 
 void QuantizerVisitor::VisitSliceLayer(const IConnectableLayer* layer,
diff --git a/src/armnn/QuantizerVisitor.hpp b/src/armnn/QuantizerVisitor.hpp
index db0134d..4013033 100644
--- a/src/armnn/QuantizerVisitor.hpp
+++ b/src/armnn/QuantizerVisitor.hpp
@@ -32,6 +32,7 @@
     ~QuantizerVisitor() = default;
 
     /// Functions to quantize the individual layers, overridden from ILayerVisitor
+    ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead")
     void VisitAbsLayer(const IConnectableLayer* layer, const char* name = nullptr) override;
 
     void VisitActivationLayer(const IConnectableLayer* layer,
@@ -78,13 +79,16 @@
                                 const DepthToSpaceDescriptor& depthToSpaceDescriptor,
                                 const char* name = nullptr) override;
 
-
     void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
                                           const DepthwiseConvolution2dDescriptor& desc,
                                           const ConstTensor& weights,
                                           const Optional<ConstTensor>& biases,
                                           const char* name = nullptr) override;
 
+    void VisitElementwiseUnaryLayer(const IConnectableLayer* layer,
+                                    const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
+                                    const char* name = nullptr) override;
+
     void VisitFullyConnectedLayer(const IConnectableLayer *layer,
                                   const FullyConnectedDescriptor& desc,
                                   const ConstTensor& weights,
@@ -142,6 +146,7 @@
                                   const ResizeBilinearDescriptor& resizeDesc,
                                   const char* name = nullptr) override;
 
+    ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead")
     void VisitRsqrtLayer(const IConnectableLayer*,
                          const char* name = nullptr) override;
 
diff --git a/src/armnn/layers/ElementwiseUnaryLayer.cpp b/src/armnn/layers/ElementwiseUnaryLayer.cpp
new file mode 100644
index 0000000..d3843da
--- /dev/null
+++ b/src/armnn/layers/ElementwiseUnaryLayer.cpp
@@ -0,0 +1,62 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ElementwiseUnaryLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+#include <algorithm>
+
+namespace armnn
+{
+
+ElementwiseUnaryLayer::ElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& param, const char* name)
+    : LayerWithParameters(1, 1, LayerType::ElementwiseUnary, param, name)
+{
+}
+
+std::unique_ptr<IWorkload> ElementwiseUnaryLayer::CreateWorkload(const IWorkloadFactory& factory) const
+{
+    ElementwiseUnaryQueueDescriptor descriptor;
+    return factory.CreateElementwiseUnary(descriptor, PrepInfoAndDesc(descriptor));
+}
+
+ElementwiseUnaryLayer* ElementwiseUnaryLayer::Clone(Graph& graph) const
+{
+    return CloneBase<ElementwiseUnaryLayer>(graph, m_Param, GetName());
+}
+
+std::vector<TensorShape> ElementwiseUnaryLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+    // Should return the shape of the input tensor
+    BOOST_ASSERT(inputShapes.size() == 1);
+    const TensorShape& input = inputShapes[0];
+
+    return std::vector<TensorShape>({ input });
+}
+
+void ElementwiseUnaryLayer::ValidateTensorShapesFromInputs()
+{
+    VerifyLayerConnections(1, CHECK_LOCATION());
+
+    std::vector<TensorShape> inferredShapes = InferOutputShapes({
+        GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()});
+    BOOST_ASSERT(inferredShapes.size() == 1);
+
+    ConditionalThrowIfNotEqual<LayerValidationException>(
+        "ElementwiseUnaryLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+        GetOutputSlot(0).GetTensorInfo().GetShape(),
+        inferredShapes[0]);
+}
+
+void ElementwiseUnaryLayer::Accept(ILayerVisitor& visitor) const
+{
+    visitor.VisitElementwiseUnaryLayer(this, GetParameters(), GetName());
+}
+
+} // namespace armnn
diff --git a/src/armnn/layers/ElementwiseUnaryLayer.hpp b/src/armnn/layers/ElementwiseUnaryLayer.hpp
new file mode 100644
index 0000000..850a814
--- /dev/null
+++ b/src/armnn/layers/ElementwiseUnaryLayer.hpp
@@ -0,0 +1,48 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+/// This layer represents a elementwiseUnary operation.
+class ElementwiseUnaryLayer : public LayerWithParameters<ElementwiseUnaryDescriptor>
+{
+public:
+    /// Makes a workload for the elementwiseUnary type
+    /// @param [in] graph The graph where this layer can be found
+    /// @param [in] factory The workload factory which will create the workload
+    /// @return A pointer to the created workload, or nullptr if not created
+    virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
+
+    /// Creates a dynamically-allocated copy of this layer
+    /// @param [in] graph The graph into which this layer is being cloned
+    ElementwiseUnaryLayer* Clone(Graph& graph) const override;
+
+    /// Returns inputShapes by default.
+    /// @param [in] inputShapes The input shapes layer has.
+    /// @return A vector to the inferred output shape.
+    std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+    /// Check if the input tensor shape(s) will lead to a valid configuration
+    /// of @ref ElementwiseUnaryLayer
+    void ValidateTensorShapesFromInputs() override;
+
+    void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+    /// Constructor to create a ElementwiseUnaryLayer
+    /// @param [in] param ElementwiseUnaryDescriptor to configure the ElementwiseUnaryLayer
+    /// @param [in] name Optional name for the layer
+    ElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& param, const char* name);
+
+    /// Default destructor
+    ~ElementwiseUnaryLayer() = default;
+};
+
+} // namespace armnn
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index 02ce12a..4782c43 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -131,14 +131,15 @@
     return workload;
 }
 
-template <typename WorkloadType,
+template <typename WorkloadType, 
           typename DescriptorType,
-          typename LayerType,
           armnn::DataType DataType>
 std::unique_ptr<WorkloadType> CreateElementwiseUnaryWorkloadTest(armnn::IWorkloadFactory & factory,
-                                                                 armnn::Graph & graph)
+                                                                 armnn::Graph & graph,
+                                                                 armnn::UnaryOperation op)
 {
-    Layer* const layer = graph.AddLayer<LayerType>("layer");
+    ElementwiseUnaryDescriptor desc = ElementwiseUnaryDescriptor(op);
+    Layer* const layer = graph.AddLayer<armnn::ElementwiseUnaryLayer>(desc, "layer");
 
     Layer* const input  = graph.AddLayer<InputLayer>(0, "input");
     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
@@ -1059,34 +1060,6 @@
     return workload;
 }
 
-template <typename RsqrtWorkload, armnn::DataType DataType>
-std::unique_ptr<RsqrtWorkload> CreateRsqrtWorkloadTest(armnn::IWorkloadFactory& factory,
-                                                       armnn::Graph&  graph)
-{
-    Layer* const layer = graph.AddLayer<RsqrtLayer>("rsqrt");
-
-    // Creates extra layers.
-    Layer* const input = graph.AddLayer<InputLayer>(0, "input");
-    Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
-    // Connects up.
-    armnn::TensorInfo tensorInfo({1, 1}, DataType);
-
-    Connect(input, layer, tensorInfo);
-    Connect(layer, output, tensorInfo);
-
-    CreateTensorHandles(graph, factory);
-
-    // Makes the workload and checks it.
-    auto workload = MakeAndCheckWorkload<RsqrtWorkload>(*layer, factory);
-
-    RsqrtQueueDescriptor queueDescriptor = workload->GetData();
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
-
-    return workload;
-}
-
 template <typename BatchToSpaceNdWorkload, armnn::DataType DataType>
 std::unique_ptr<BatchToSpaceNdWorkload> CreateBatchToSpaceNdWorkloadTest(armnn::IWorkloadFactory& factory,
                                                                          armnn::Graph&  graph)
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index 52beb63..d568b2c 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -1672,61 +1672,6 @@
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
 }
 
-BOOST_AUTO_TEST_CASE(QuantizeAbs)
-{
-    class TestAbsQuantization : public TestLeakyReLuActivationQuantization
-    {
-    public:
-        TestAbsQuantization(const TensorShape& inputShape, const TensorShape& outputShape) :
-                TestLeakyReLuActivationQuantization(inputShape, outputShape)
-        {}
-
-        TestAbsQuantization(const QuantizerOptions& options,
-                            const TensorShape& inputShape,
-                            const TensorShape& outputShape) :
-                TestLeakyReLuActivationQuantization(options, inputShape, outputShape)
-        {}
-
-        void VisitAbsLayer(const IConnectableLayer *layer,
-                           const char *name = nullptr) override
-        {
-            boost::ignore_unused(name);
-            TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
-
-            TestQuantizationParams(outputInfo,
-                                   { 30.0f / g_Asymm8QuantizationBase, 128 },
-                                   { 15.0f / g_Symm8QuantizationBase,  0},
-                                   { 15.0f / g_Symm16QuantizationBase, 0 });
-        }
-    };
-
-    INetworkPtr network = INetwork::Create();
-
-    //Add the layer being tested
-    IConnectableLayer* absLayer = network->AddAbsLayer();
-
-    const TensorShape shape{1U};
-    TensorInfo info(shape, DataType::Float32);
-
-    IConnectableLayer* activation = CreateStartOfLeakyReluNetwork(network.get(), info);
-
-    CompleteLeakyReluNetwork(network.get(), activation, absLayer, info);
-
-    INetworkPtr quantizedNetworkQAsymm8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
-    TestAbsQuantization validatorQAsymm8(shape, shape);
-    VisitLayersTopologically(quantizedNetworkQAsymm8.get(), validatorQAsymm8);
-
-    const QuantizerOptions qSymm8Options(DataType::QSymmS8);
-    INetworkPtr quantizedNetworkQSymm8 = INetworkQuantizer::Create(network.get(), qSymm8Options)->ExportNetwork();
-    TestAbsQuantization validatorQSymm8(qSymm8Options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
-
-    const QuantizerOptions qSymm16options(DataType::QSymmS16);
-    INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
-    TestAbsQuantization validatorQSymm16(qSymm16options, shape, shape);
-    VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
-}
-
 BOOST_AUTO_TEST_CASE(QuantizeArgMinMax)
 {
     class TestArgMinMaxQuantization : public TestQuantization
diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
index 36bbd36..efe50a5 100644
--- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
+++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
@@ -86,6 +86,12 @@
 }
 
 template<>
+armnn::ElementwiseUnaryDescriptor GetDescriptor<armnn::ElementwiseUnaryDescriptor>()
+{
+    return armnn::ElementwiseUnaryDescriptor(armnn::UnaryOperation::Abs);
+}
+
+template<>
 armnn::InstanceNormalizationDescriptor GetDescriptor<armnn::InstanceNormalizationDescriptor>()
 {
     armnn::InstanceNormalizationDescriptor descriptor;
@@ -251,6 +257,7 @@
 TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(BatchToSpaceNd)
 TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Comparison)
 TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Concat)
+TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(ElementwiseUnary)
 TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(InstanceNormalization)
 TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(L2Normalization)
 TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(LogSoftmax)
diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
index 221057c..f792bc3 100644
--- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
+++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
@@ -48,6 +48,7 @@
 DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Comparison)
 DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Concat)
 DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(DepthToSpace)
+DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(ElementwiseUnary)
 DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(InstanceNormalization)
 DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(L2Normalization)
 DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(LogSoftmax)
diff --git a/src/armnn/test/TestNameOnlyLayerVisitor.cpp b/src/armnn/test/TestNameOnlyLayerVisitor.cpp
index 32de94e..0653b39 100644
--- a/src/armnn/test/TestNameOnlyLayerVisitor.cpp
+++ b/src/armnn/test/TestNameOnlyLayerVisitor.cpp
@@ -38,7 +38,6 @@
 
 BOOST_AUTO_TEST_SUITE(TestNameOnlyLayerVisitor)
 
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Abs)
 TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Addition)
 TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Dequantize)
 TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Division)
@@ -50,7 +49,6 @@
 TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Multiplication)
 TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Prelu)
 TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Quantize)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Rsqrt)
 TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Subtraction)
 TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Switch)
 
diff --git a/src/armnn/test/TestNameOnlyLayerVisitor.hpp b/src/armnn/test/TestNameOnlyLayerVisitor.hpp
index c770b5e..84dfdd6 100644
--- a/src/armnn/test/TestNameOnlyLayerVisitor.hpp
+++ b/src/armnn/test/TestNameOnlyLayerVisitor.hpp
@@ -25,7 +25,6 @@
 
 } // anonymous namespace
 
-DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Abs)
 DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Addition)
 DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Dequantize)
 DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Division)
@@ -37,6 +36,5 @@
 DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Multiplication)
 DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Prelu)
 DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Quantize)
-DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Rsqrt)
 DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Subtraction)
 DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Switch)
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index 6077d05..99ee0b5 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -203,6 +203,7 @@
     m_ParserFunctions[Layer_DequantizeLayer]             = &Deserializer::ParseDequantize;
     m_ParserFunctions[Layer_DetectionPostProcessLayer]   = &Deserializer::ParseDetectionPostProcess;
     m_ParserFunctions[Layer_DivisionLayer]               = &Deserializer::ParseDivision;
+    m_ParserFunctions[Layer_ElementwiseUnaryLayer]       = &Deserializer::ParseElementwiseUnary;
     m_ParserFunctions[Layer_EqualLayer]                  = &Deserializer::ParseEqual;
     m_ParserFunctions[Layer_FullyConnectedLayer]         = &Deserializer::ParseFullyConnected;
     m_ParserFunctions[Layer_FloorLayer]                  = &Deserializer::ParseFloor;
@@ -457,6 +458,25 @@
     }
 }
 
+armnn::UnaryOperation ToUnaryOperation(armnnSerializer::UnaryOperation operation)
+{
+    switch (operation)
+    {
+        case armnnSerializer::UnaryOperation::UnaryOperation_Abs:
+            return armnn::UnaryOperation::Abs;
+        case armnnSerializer::UnaryOperation::UnaryOperation_Rsqrt:
+            return armnn::UnaryOperation::Rsqrt;
+        case armnnSerializer::UnaryOperation::UnaryOperation_Sqrt:
+            return armnn::UnaryOperation::Sqrt;
+        case armnnSerializer::UnaryOperation::UnaryOperation_Exp:
+            return armnn::UnaryOperation::Exp;
+        case armnnSerializer::UnaryOperation::UnaryOperation_Neg:
+            return armnn::UnaryOperation::Neg;
+        default:
+            throw armnn::InvalidArgumentException("Unary operation unknown");
+    }
+}
+
 armnn::ResizeMethod ToResizeMethod(armnnSerializer::ResizeMethod method)
 {
     switch (method)
@@ -926,7 +946,8 @@
 
     auto layerName = GetLayerName(graph, layerIndex);
 
-    IConnectableLayer* layer = m_Network->AddAbsLayer(layerName.c_str());
+    armnn::ElementwiseUnaryDescriptor descriptor(armnn::UnaryOperation::Abs);
+    IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(descriptor, layerName.c_str());
     armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
@@ -1496,6 +1517,33 @@
     RegisterOutputSlots(graph, layerIndex, layer);
 }
 
+void Deserializer::ParseElementwiseUnary(GraphPtr graph, unsigned int layerIndex)
+{
+    CHECK_LAYERS(graph, 0, layerIndex);
+    CHECK_LOCATION();
+
+    auto inputs = GetInputs(graph, layerIndex);
+    CHECK_VALID_SIZE(inputs.size(), 1);
+
+    auto outputs = GetOutputs(graph, layerIndex);
+    CHECK_VALID_SIZE(outputs.size(), 1);
+
+    auto fbLayer      = graph->layers()->Get(layerIndex)->layer_as_ElementwiseUnaryLayer();
+    auto fbDescriptor = fbLayer->descriptor();
+
+    armnn::ElementwiseUnaryDescriptor descriptor;
+    descriptor.m_Operation = ToUnaryOperation(fbDescriptor->operation());
+
+    const std::string& layerName = GetLayerName(graph, layerIndex);
+    IConnectableLayer* layer     = m_Network->AddElementwiseUnaryLayer(descriptor, layerName.c_str());
+
+    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+    RegisterInputSlots(graph, layerIndex, layer);
+    RegisterOutputSlots(graph, layerIndex, layer);
+}
+
 void Deserializer::ParseConcat(GraphPtr graph, unsigned int layerIndex)
 {
     CHECK_LAYERS(graph, 0, layerIndex);
@@ -2135,8 +2183,9 @@
     CHECK_VALID_SIZE(outputs.size(), 1);
 
     auto layerName = GetLayerName(graph, layerIndex);
-    IConnectableLayer* layer = m_Network->AddRsqrtLayer(layerName.c_str());
 
+    armnn::ElementwiseUnaryDescriptor descriptor(armnn::UnaryOperation::Rsqrt);
+    IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(descriptor, layerName.c_str());
     armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
diff --git a/src/armnnDeserializer/Deserializer.hpp b/src/armnnDeserializer/Deserializer.hpp
index babb56e..ae8be6e 100644
--- a/src/armnnDeserializer/Deserializer.hpp
+++ b/src/armnnDeserializer/Deserializer.hpp
@@ -92,6 +92,7 @@
     void ParseDequantize(GraphPtr graph, unsigned int layerIndex);
     void ParseDetectionPostProcess(GraphPtr graph, unsigned int layerIndex);
     void ParseDivision(GraphPtr graph, unsigned int layerIndex);
+    void ParseElementwiseUnary(GraphPtr graph, unsigned int layerIndex);
     void ParseEqual(GraphPtr graph, unsigned int layerIndex);
     void ParseFloor(GraphPtr graph, unsigned int layerIndex);
     void ParseFullyConnected(GraphPtr graph, unsigned int layerIndex);
diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs
index 0d30d96..0f8a816 100644
--- a/src/armnnSerializer/ArmnnSchema.fbs
+++ b/src/armnnSerializer/ArmnnSchema.fbs
@@ -147,7 +147,8 @@
     InstanceNormalization = 50,
     LogSoftmax = 51,
     Comparison = 52,
-    StandIn = 53
+    StandIn = 53,
+    ElementwiseUnary = 54
 }
 
 // Base layer table to be used as part of other layers
@@ -166,6 +167,7 @@
 
 // Table for each layer defined below
 
+/// @deprecated Use ElementwiseUnaryLayer instead
 table AbsLayer {
     base:LayerBase;
 }
@@ -252,6 +254,23 @@
     base:LayerBase;
 }
 
+enum UnaryOperation : byte {
+    Abs = 0,
+    Rsqrt = 1,
+    Sqrt = 2,
+    Exp = 3,
+    Neg = 4
+}
+
+table ElementwiseUnaryDescriptor {
+    operation:UnaryOperation;
+}
+
+table ElementwiseUnaryLayer {
+    base:LayerBase;
+    descriptor:ElementwiseUnaryDescriptor;
+}
+
 /// @deprecated Use ComparisonLayer instead
 table EqualLayer {
     base:LayerBase;
@@ -502,6 +521,7 @@
     padValue:float = 0;
 }
 
+/// @deprecated Use ElementwiseUnaryLayer instead
 table RsqrtLayer {
     base:LayerBase;
 }
@@ -798,7 +818,8 @@
     InstanceNormalizationLayer,
     LogSoftmaxLayer,
     ComparisonLayer,
-    StandInLayer
+    StandInLayer,
+    ElementwiseUnaryLayer
 }
 
 table AnyLayer {
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index 91b6241..13ea0f0 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -439,6 +439,21 @@
     CreateAnyLayer(fbDivisionLayer.o, serializer::Layer::Layer_DivisionLayer);
 }
 
+void SerializerVisitor::VisitElementwiseUnaryLayer(const armnn::IConnectableLayer* layer,
+                                                   const armnn::ElementwiseUnaryDescriptor& descriptor,
+                                                   const char* name)
+{
+    boost::ignore_unused(name);
+
+    auto fbBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_ElementwiseUnary);
+    auto fbDescriptor = serializer::CreateElementwiseUnaryDescriptor(
+        m_flatBufferBuilder,
+        GetFlatBufferUnaryOperation(descriptor.m_Operation));
+
+    auto fbLayer = serializer::CreateElementwiseUnaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
+    CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_ElementwiseUnaryLayer);
+}
+
 void SerializerVisitor::VisitEqualLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
     boost::ignore_unused(name);
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index 7dfd534..d92c93d 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -44,6 +44,7 @@
         return m_serializedLayers;
     }
 
+    ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead")
     void VisitAbsLayer(const armnn::IConnectableLayer* layer,
                        const char* name = nullptr) override;
 
@@ -109,6 +110,10 @@
     void VisitDivisionLayer(const armnn::IConnectableLayer* layer,
                             const char* name = nullptr) override;
 
+    void VisitElementwiseUnaryLayer(const armnn::IConnectableLayer* layer,
+                                    const armnn::ElementwiseUnaryDescriptor& descriptor,
+                                    const char* name = nullptr) override;
+
     ARMNN_DEPRECATED_MSG("Use VisitComparisonLayer instead")
     void VisitEqualLayer(const armnn::IConnectableLayer* layer,
                          const char* name = nullptr) override;
@@ -210,6 +215,7 @@
                                   const armnn::ResizeBilinearDescriptor& resizeDescriptor,
                                   const char* name = nullptr) override;
 
+    ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead")
     void VisitRsqrtLayer(const armnn::IConnectableLayer* layer,
                          const char* name = nullptr) override;
 
diff --git a/src/armnnSerializer/SerializerSupport.md b/src/armnnSerializer/SerializerSupport.md
index 4fc880a..2f77a8e 100644
--- a/src/armnnSerializer/SerializerSupport.md
+++ b/src/armnnSerializer/SerializerSupport.md
@@ -6,12 +6,12 @@
 
 The Arm NN SDK Serializer currently supports the following layers:
 
-* Abs
 * Activation
 * Addition
 * ArgMinMax
 * BatchToSpaceNd
 * BatchNormalization
+* Comparison
 * Concat
 * Constant
 * Convolution2d
@@ -20,6 +20,7 @@
 * Dequantize
 * DetectionPostProcess
 * Division
+* ElementwiseUnary
 * Floor
 * FullyConnected
 * Gather
@@ -43,7 +44,6 @@
 * QuantizedLstm
 * Reshape
 * Resize
-* Rsqrt
 * Slice
 * Softmax
 * SpaceToBatchNd
@@ -66,3 +66,6 @@
 * Merger will deserialize as Concat
 * Greater will deserialize as Comparison
 * ResizeBilinear will deserialize as Resize
+* Abs will deserialize as ElementwiseUnary
+* Rsqrt will deserialize as ElementwiseUnary
+
diff --git a/src/armnnSerializer/SerializerUtils.cpp b/src/armnnSerializer/SerializerUtils.cpp
index df1ef28..02a5ed3 100644
--- a/src/armnnSerializer/SerializerUtils.cpp
+++ b/src/armnnSerializer/SerializerUtils.cpp
@@ -79,6 +79,25 @@
     }
 }
 
+armnnSerializer::UnaryOperation GetFlatBufferUnaryOperation(armnn::UnaryOperation comparisonOperation)
+{
+    switch (comparisonOperation)
+    {
+        case armnn::UnaryOperation::Abs:
+            return armnnSerializer::UnaryOperation::UnaryOperation_Abs;
+        case armnn::UnaryOperation::Rsqrt:
+            return armnnSerializer::UnaryOperation::UnaryOperation_Rsqrt;
+        case armnn::UnaryOperation::Sqrt:
+            return armnnSerializer::UnaryOperation::UnaryOperation_Sqrt;
+        case armnn::UnaryOperation::Exp:
+            return armnnSerializer::UnaryOperation::UnaryOperation_Exp;
+        case armnn::UnaryOperation::Neg:
+            return armnnSerializer::UnaryOperation::UnaryOperation_Neg;
+        default:
+            throw armnn::InvalidArgumentException("Unary operation unknown");
+    }
+}
+
 armnnSerializer::PoolingAlgorithm GetFlatBufferPoolingAlgorithm(armnn::PoolingAlgorithm poolingAlgorithm)
 {
     switch (poolingAlgorithm)
diff --git a/src/armnnSerializer/SerializerUtils.hpp b/src/armnnSerializer/SerializerUtils.hpp
index 4855333..edd48a5 100644
--- a/src/armnnSerializer/SerializerUtils.hpp
+++ b/src/armnnSerializer/SerializerUtils.hpp
@@ -18,6 +18,8 @@
 
 armnnSerializer::DataLayout GetFlatBufferDataLayout(armnn::DataLayout dataLayout);
 
+armnnSerializer::UnaryOperation GetFlatBufferUnaryOperation(armnn::UnaryOperation unaryOperation);
+
 armnnSerializer::PoolingAlgorithm GetFlatBufferPoolingAlgorithm(armnn::PoolingAlgorithm poolingAlgorithm);
 
 armnnSerializer::OutputShapeRounding GetFlatBufferOutputShapeRounding(
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index 2b13109..47804fe 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -268,32 +268,6 @@
 
 BOOST_AUTO_TEST_SUITE(SerializerTests)
 
-BOOST_AUTO_TEST_CASE(SerializeAbs)
-{
-    DECLARE_LAYER_VERIFIER_CLASS(Abs)
-
-    const std::string layerName("abs");
-    const armnn::TensorInfo tensorInfo({1, 2, 3}, armnn::DataType::Float32);
-
-    armnn::INetworkPtr network = armnn::INetwork::Create();
-    armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0);
-
-    armnn::IConnectableLayer* const absLayer = network->AddAbsLayer(layerName.c_str());
-    armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
-
-    inputLayer->GetOutputSlot(0).Connect(absLayer->GetInputSlot(0));
-    absLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
-
-    inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-    absLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-
-    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
-
-    AbsLayerVerifier verifier(layerName, {tensorInfo}, {tensorInfo});
-    deserializedNetwork->Accept(verifier);
-}
-
 BOOST_AUTO_TEST_CASE(SerializeAddition)
 {
     DECLARE_LAYER_VERIFIER_CLASS(Addition)
@@ -2176,31 +2150,6 @@
     deserializedNetwork->Accept(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeRsqrt)
-{
-    DECLARE_LAYER_VERIFIER_CLASS(Rsqrt)
-
-    const std::string layerName("rsqrt");
-    const armnn::TensorInfo tensorInfo({ 3, 1, 2 }, armnn::DataType::Float32);
-
-    armnn::INetworkPtr network = armnn::INetwork::Create();
-    armnn::IConnectableLayer* const inputLayer  = network->AddInputLayer(0);
-    armnn::IConnectableLayer* const rsqrtLayer  = network->AddRsqrtLayer(layerName.c_str());
-    armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
-
-    inputLayer->GetOutputSlot(0).Connect(rsqrtLayer->GetInputSlot(0));
-    rsqrtLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
-
-    inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-    rsqrtLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-
-    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
-
-    RsqrtLayerVerifier verifier(layerName, {tensorInfo}, {tensorInfo});
-    deserializedNetwork->Accept(verifier);
-}
-
 BOOST_AUTO_TEST_CASE(SerializeSlice)
 {
     DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Slice)
diff --git a/src/armnnTfParser/TfParser.cpp b/src/armnnTfParser/TfParser.cpp
index ca98f46..af86619 100755
--- a/src/armnnTfParser/TfParser.cpp
+++ b/src/armnnTfParser/TfParser.cpp
@@ -2627,7 +2627,8 @@
 
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
 
-    IConnectableLayer* const layer = m_Network->AddRsqrtLayer(nodeDef.name().c_str());
+    ElementwiseUnaryDescriptor descriptor(UnaryOperation::Rsqrt);
+    IConnectableLayer* const layer = m_Network->AddElementwiseUnaryLayer(descriptor, nodeDef.name().c_str());
 
     IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
     prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 8332774..b19356f 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -3,7 +3,10 @@
 // SPDX-License-Identifier: MIT
 //
 
+#include <armnn/Deprecated.hpp>
+#include <armnn/Descriptors.hpp>
 #include <armnn/Exceptions.hpp>
+#include <armnn/Types.hpp>
 
 #include <backendsCommon/LayerSupportBase.hpp>
 
@@ -195,6 +198,26 @@
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
+bool LayerSupportBase::IsElementwiseUnarySupported(const TensorInfo& input,
+                                                   const TensorInfo& output,
+                                                   const ElementwiseUnaryDescriptor& descriptor,
+                                                   Optional<std::string&> reasonIfUnsupported) const
+{
+    if (descriptor.m_Operation == UnaryOperation::Abs)
+    {
+        ARMNN_NO_DEPRECATE_WARN_BEGIN
+        return IsAbsSupported(input, output, reasonIfUnsupported);
+        ARMNN_NO_DEPRECATE_WARN_END
+    }
+    else if (descriptor.m_Operation == UnaryOperation::Rsqrt)
+    {
+        ARMNN_NO_DEPRECATE_WARN_BEGIN
+        return IsRsqrtSupported(input, output, reasonIfUnsupported);
+        ARMNN_NO_DEPRECATE_WARN_END
+    }
+    return false;
+}
+
 bool LayerSupportBase::IsEqualSupported(const armnn::TensorInfo& /*input0*/,
                                         const armnn::TensorInfo& /*input1*/,
                                         const armnn::TensorInfo& /*output*/,
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index 60f94d0..7a65eb5 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -13,6 +13,7 @@
 class LayerSupportBase : public ILayerSupport
 {
 public:
+    ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
     bool IsAbsSupported(const TensorInfo& input,
                         const TensorInfo& output,
                         Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
@@ -119,6 +120,11 @@
                              const TensorInfo& output,
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsElementwiseUnarySupported(const TensorInfo& input,
+                                     const TensorInfo& output,
+                                     const ElementwiseUnaryDescriptor& descriptor,
+                                     Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
     bool IsEqualSupported(const TensorInfo& input0,
                           const TensorInfo& input1,
@@ -278,6 +284,7 @@
                                    const TensorInfo& output,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
     bool IsRsqrtSupported(const TensorInfo& input,
                           const TensorInfo& output,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index fa5c6fe..d2ab41e 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -2969,4 +2969,28 @@
     }
 }
 
+void ElementwiseUnaryQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+    const std::string descriptorName{"ElementwiseUnaryQueueDescriptor"};
+
+    ValidateNumInputs(workloadInfo,  descriptorName, 1);
+    ValidateNumOutputs(workloadInfo, descriptorName, 1);
+
+    const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
+    const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
+
+    ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
+
+    std::vector<DataType> supportedTypes =
+    {
+        DataType::Float16,
+        DataType::Float32,
+        DataType::QAsymmU8,
+        DataType::QSymmS16
+    };
+
+    ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
+    ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
+}
+
 } // namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index 43be3cd..c5fcf15 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -560,4 +560,9 @@
     void Validate(const WorkloadInfo& workloadInfo) const;
 };
 
+struct ElementwiseUnaryQueueDescriptor : QueueDescriptorWithParameters<ElementwiseUnaryDescriptor>
+{
+    void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
 } // namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 54ae585..acb73b5 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -68,15 +68,6 @@
 
     switch(layer.GetType())
     {
-        case LayerType::Abs:
-        {
-            const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
-            const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
-            result = layerSupportObject->IsAbsSupported(OverrideDataType(input, dataType),
-                                                        OverrideDataType(output, dataType),
-                                                        reason);
-            break;
-        }
         case LayerType::Activation:
         {
             auto cLayer = boost::polymorphic_downcast<const ActivationLayer*>(&layer);
@@ -294,6 +285,19 @@
                                                                          reason);
             break;
         }
+        case LayerType::ElementwiseUnary:
+        {
+            auto cLayer = boost::polymorphic_downcast<const ElementwiseUnaryLayer*>(&layer);
+
+            const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+            const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+
+            result = layerSupportObject->IsElementwiseUnarySupported(OverrideDataType(input, dataType),
+                                                                     OverrideDataType(output, dataType),
+                                                                     cLayer->GetParameters(),
+                                                                     reason);
+            break;
+        }
         case LayerType::FakeQuantization:
         {
             auto cLayer = boost::polymorphic_downcast<const FakeQuantizationLayer*>(&layer);
@@ -807,15 +811,6 @@
                                                            reason);
             break;
         }
-        case LayerType::Rsqrt:
-        {
-            const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
-            const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
-            result = layerSupportObject->IsRsqrtSupported(OverrideDataType(input, dataType),
-                                                          OverrideDataType(output, dataType),
-                                                          reason);
-            break;
-        }
         case LayerType::Slice:
         {
             auto cLayer = boost::polymorphic_downcast<const SliceLayer*>(&layer);
@@ -1182,6 +1177,12 @@
     return std::unique_ptr<IWorkload>();
 }
 
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& /*desc*/,
+                                                                    const WorkloadInfo& /*info*/) const
+{
+    return std::unique_ptr<IWorkload>();
+}
+
 std::unique_ptr<IWorkload> IWorkloadFactory::CreateEqual(const EqualQueueDescriptor& /*descriptor*/,
                                                          const WorkloadInfo& /*Info*/) const
 {
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index 6e6478f..e1cdff6 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -51,6 +51,7 @@
                                                               DataLayout dataLayout,
                                                               const bool IsMemoryManaged = true) const = 0;
 
+    ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead")
     virtual std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor,
                                                  const WorkloadInfo& info) const;
 
@@ -105,6 +106,9 @@
     virtual std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor,
                                                       const WorkloadInfo& info) const;
 
+    virtual std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
+                                                              const WorkloadInfo& Info) const;
+
     ARMNN_DEPRECATED_MSG("Use CreateComparison instead")
     virtual std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor,
                                                    const WorkloadInfo& Info) const;
@@ -200,6 +204,7 @@
     virtual std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
                                                             const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead")
     virtual std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const;
 
diff --git a/src/backends/backendsCommon/WorkloadFactoryBase.hpp b/src/backends/backendsCommon/WorkloadFactoryBase.hpp
index 1947c69..9602cc3 100644
--- a/src/backends/backendsCommon/WorkloadFactoryBase.hpp
+++ b/src/backends/backendsCommon/WorkloadFactoryBase.hpp
@@ -106,6 +106,22 @@
                                               const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
+    std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
+                                                      const WorkloadInfo& info) const override
+    {
+        if (descriptor.m_Parameters.m_Operation == UnaryOperation::Abs)
+        {
+            AbsQueueDescriptor absDescriptor;
+            return CreateAbs(absDescriptor, info);
+        }
+        else if (descriptor.m_Parameters.m_Operation == UnaryOperation::Rsqrt)
+        {
+            RsqrtQueueDescriptor rsqrtDescriptor;
+            return CreateRsqrt(rsqrtDescriptor, info);
+        }
+        return nullptr;
+    }
+
     std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& /*descriptor*/,
                                                       const WorkloadInfo& /*info*/) const override
     { return nullptr; }
diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk
index 4461cd6..56a21b3 100644
--- a/src/backends/backendsCommon/common.mk
+++ b/src/backends/backendsCommon/common.mk
@@ -50,6 +50,7 @@
     test/layerTests/DepthToSpaceTestImpl.cpp \
     test/layerTests/DequantizeTestImpl.cpp \
     test/layerTests/DivisionTestImpl.cpp \
+    test/layerTests/ElementwiseUnaryTestImpl.cpp \
     test/layerTests/FakeQuantizationTestImpl.cpp \
     test/layerTests/FloorTestImpl.cpp \
     test/layerTests/FullyConnectedTestImpl.cpp \
diff --git a/src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp
deleted file mode 100644
index 602ccd6..0000000
--- a/src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp
+++ /dev/null
@@ -1,65 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "CommonTestUtils.hpp"
-
-#include <QuantizeHelper.hpp>
-#include <ResolveType.hpp>
-
-
-namespace
-{
-
-armnn::INetworkPtr CreateAbsNetwork(const armnn::TensorInfo& tensorInfo)
-{
-    armnn::INetworkPtr network(armnn::INetwork::Create());
-
-    armnn::IConnectableLayer* inputLayer  = network->AddInputLayer(0, "input");
-    armnn::IConnectableLayer* absLayer    = network->AddAbsLayer("abs");
-    armnn::IConnectableLayer* outputLayer = network->AddOutputLayer(0, "output");
-
-    Connect(inputLayer, absLayer, tensorInfo, 0, 0);
-    Connect(absLayer, outputLayer, tensorInfo, 0, 0);
-
-    return network;
-}
-
-} // anonymous namespace
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-void AbsEndToEnd(const std::vector<armnn::BackendId>& backends)
-{
-    using namespace armnn;
-
-    const float   qScale  = IsQuantizedType<T>() ? 0.25f : 1.0f;
-    const int32_t qOffset = IsQuantizedType<T>() ? 50    : 0;
-
-    TensorInfo tensorInfo({ 1, 1, 2, 3 }, ArmnnType, qScale, qOffset);
-
-    std::vector<float> inputData =
-    {
-       -1.f,  2.f, -3.f,
-        4.f, -5.f,  6.f
-    };
-
-    std::vector<float> expectedOutputData =
-    {
-        1.f, 2.f, 3.f,
-        4.f, 5.f, 6.f
-    };
-
-    // quantize data
-    std::vector<T> qInputData          = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
-    std::vector<T> qExpectedOutputData = armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset);
-
-    INetworkPtr network = CreateAbsNetwork(tensorInfo);
-
-    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),
-                                                { { 0, qInputData } },
-                                                { { 0, qExpectedOutputData } },
-                                                backends);
-}
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index 82df782..4716bd4 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -4,7 +4,6 @@
 #
 
 list(APPEND armnnBackendsCommonUnitTests_sources
-    AbsEndToEndTestImpl.hpp
     ActivationFixture.hpp
     ArgMinMaxEndToEndTestImpl.hpp
     BackendIdTests.cpp
@@ -19,6 +18,7 @@
     DetectionPostProcessEndToEndTestImpl.hpp
     DynamicBackendTests.cpp
     DynamicBackendTests.hpp
+    ElementwiseUnaryEndToEndTestImpl.hpp
     EndToEndTestImpl.hpp
     GatherEndToEndTestImpl.hpp
     InstanceNormalizationEndToEndTestImpl.cpp
@@ -81,6 +81,8 @@
     layerTests/DivisionTestImpl.cpp
     layerTests/DivisionTestImpl.hpp
     layerTests/ElementwiseTestImpl.hpp
+    layerTests/ElementwiseUnaryTestImpl.cpp
+    layerTests/ElementwiseUnaryTestImpl.hpp
     layerTests/FakeQuantizationTestImpl.cpp
     layerTests/FakeQuantizationTestImpl.hpp
     layerTests/FloorTestImpl.cpp
diff --git a/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
new file mode 100644
index 0000000..4c93735
--- /dev/null
+++ b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
@@ -0,0 +1,77 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "CommonTestUtils.hpp"
+
+#include <ResolveType.hpp>
+
+#include <armnn/INetwork.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+#include <vector>
+
+namespace
+{
+
+template<armnn::DataType ArmnnTypeInput>
+INetworkPtr CreateElementwiseUnaryNetwork(const TensorShape& inputShape,
+                                          const TensorShape& outputShape,
+                                          UnaryOperation operation,
+                                          const float qScale = 1.0f,
+                                          const int32_t qOffset = 0)
+{
+    using namespace armnn;
+
+    INetworkPtr net(INetwork::Create());
+
+    ElementwiseUnaryDescriptor descriptor(operation);
+    IConnectableLayer* elementwiseUnaryLayer = net->AddElementwiseUnaryLayer(descriptor, "elementwiseUnary");
+
+    TensorInfo inputTensorInfo(inputShape, ArmnnTypeInput, qScale, qOffset);
+    IConnectableLayer* input = net->AddInputLayer(boost::numeric_cast<LayerBindingId>(0));
+    Connect(input, elementwiseUnaryLayer, inputTensorInfo, 0, 0);
+
+    TensorInfo outputTensorInfo(outputShape, ArmnnTypeInput, qScale, qOffset);
+    IConnectableLayer* output = net->AddOutputLayer(0, "output");
+    Connect(elementwiseUnaryLayer, output, outputTensorInfo, 0, 0);
+
+    return net;
+}
+
+template<armnn::DataType ArmnnInType,
+         typename TInput = armnn::ResolveType<ArmnnInType>>
+void ElementwiseUnarySimpleEndToEnd(const std::vector<BackendId>& backends,
+                                    UnaryOperation operation,
+                                    const std::vector<float> expectedOutput)
+{
+    using namespace armnn;
+
+    const float   qScale  = IsQuantizedType<TInput>() ? 0.25f : 1.0f;
+    const int32_t qOffset = IsQuantizedType<TInput>() ? 50    : 0;
+
+    const TensorShape& inputShape  = { 2, 2, 2, 2 };
+    const TensorShape& outputShape = { 2, 2, 2, 2 };
+
+    // Builds up the structure of the network
+    INetworkPtr net = CreateElementwiseUnaryNetwork<ArmnnInType>(inputShape, outputShape, operation, qScale, qOffset);
+
+    BOOST_TEST_CHECKPOINT("create a network");
+
+    const std::vector<float> input({ 1, -1, 1, 1,  5, -5, 5, 5,
+                                       -3, 3, 3, 3,  4, 4, -4, 4 });
+
+    // quantize data
+    std::vector<TInput> qInputData      = armnnUtils::QuantizedVector<TInput>(input, qScale, qOffset);
+    std::vector<TInput> qExpectedOutput = armnnUtils::QuantizedVector<TInput>(expectedOutput, qScale, qOffset);
+
+    std::map<int, std::vector<TInput>> inputTensorData    = {{ 0, qInputData }};
+    std::map<int, std::vector<TInput>> expectedOutputData = {{ 0, qExpectedOutput }};
+
+    EndToEndLayerTestImpl<ArmnnInType, ArmnnInType>(move(net), inputTensorData, expectedOutputData, backends);
+}
+
+} // anonymous namespace
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 031210f..e4ce740 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -427,8 +427,6 @@
 struct LayerTypePolicy;
 
 // Every entry in the armnn::LayerType enum must be accounted for below.
-DECLARE_LAYER_POLICY_1_PARAM(Abs)
-
 DECLARE_LAYER_POLICY_2_PARAM(Activation)
 
 DECLARE_LAYER_POLICY_1_PARAM(Addition)
@@ -465,6 +463,8 @@
 
 DECLARE_LAYER_POLICY_2_PARAM(DetectionPostProcess)
 
+DECLARE_LAYER_POLICY_2_PARAM(ElementwiseUnary)
+
 DECLARE_LAYER_POLICY_2_PARAM(FakeQuantization)
 
 DECLARE_LAYER_POLICY_1_PARAM(Floor)
@@ -517,8 +517,6 @@
 
 DECLARE_LAYER_POLICY_2_PARAM(Reshape)
 
-DECLARE_LAYER_POLICY_1_PARAM(Rsqrt)
-
 DECLARE_LAYER_POLICY_2_PARAM(Slice)
 
 DECLARE_LAYER_POLICY_2_PARAM(Softmax)
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index 05c307e..eba7944 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -22,6 +22,7 @@
 #include <backendsCommon/test/layerTests/DequantizeTestImpl.hpp>
 #include <backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp>
 #include <backendsCommon/test/layerTests/DivisionTestImpl.hpp>
+#include <backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp>
 #include <backendsCommon/test/layerTests/FakeQuantizationTestImpl.hpp>
 #include <backendsCommon/test/layerTests/FloorTestImpl.hpp>
 #include <backendsCommon/test/layerTests/FullyConnectedTestImpl.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp
index cc57893..7706809 100644
--- a/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp
@@ -4,85 +4,15 @@
 //
 
 #include "AbsTestImpl.hpp"
+#include "ElementwiseUnaryTestImpl.hpp"
 
 
-#include <backendsCommon/test/DataTypeUtils.hpp>
-#include <backendsCommon/test/TensorCopyUtils.hpp>
-#include <backendsCommon/test/WorkloadTestUtils.hpp>
-
-#include <test/TensorHelpers.hpp>
-
-namespace
-{
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 2> Abs2dTestCommon(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::TensorInfo inputTensorInfo,
-    const armnn::TensorInfo outputTensorInfo,
-    const std::vector<float>& inputValues,
-    const std::vector<float>& expectedOutputValues)
-{
-    boost::ignore_unused(memoryManager);
-    auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputValues,inputTensorInfo));
-
-    LayerTestResult<T, 2> result(outputTensorInfo);
-
-    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
-                                             ConvertToDataType<ArmnnType>(expectedOutputValues,outputTensorInfo));
-
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
-    armnn::AbsQueueDescriptor descriptor;
-
-    armnn::WorkloadInfo info;
-
-    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
-    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAbs(descriptor, info);
-
-    inputHandle->Allocate();
-    outputHandle->Allocate();
-
-    CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
-
-    workload->PostAllocationConfigure();
-    workload->Execute();
-
-    CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
-
-    return result;
-}
-
-} // anonymous namespace
-
 template<armnn::DataType ArmnnType, typename T>
 LayerTestResult<T, 2> Abs2dTest(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    const armnn::TensorShape inputShape{ 2, 2 };
-    const armnn::TensorShape outputShape{ 2, 2 };
-
-    float qScale    = 0.0625f;
-    int32_t qOffset = 64;
-
-    if (ArmnnType == armnn::DataType::QSymmS16)
-    {
-        qScale  = 0.1f;
-        qOffset = 0;
-    }
-
-    armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
-    inputTensorInfo.SetQuantizationScale(qScale);
-    inputTensorInfo.SetQuantizationOffset(qOffset);
-
-    armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
-    outputTensorInfo.SetQuantizationScale(qScale);
-    outputTensorInfo.SetQuantizationOffset(qOffset);
+    const unsigned int inputShape[] = { 2, 2 };
 
     std::vector<float> inputValues
     {
@@ -98,9 +28,14 @@
     std::vector<float> expectedOutputValues(inputValues.size());
     std::transform(inputValues.begin(), inputValues.end(), expectedOutputValues.begin(), f);
 
-    return Abs2dTestCommon<ArmnnType>(workloadFactory, memoryManager,
-                                inputTensorInfo, outputTensorInfo,
-                                inputValues, expectedOutputValues);
+    return ElementwiseUnaryTestHelper<2, ArmnnType>(
+        workloadFactory,
+        memoryManager,
+        armnn::UnaryOperation::Abs,
+        inputShape,
+        inputValues,
+        inputShape,
+        expectedOutputValues);
 }
 
 template<armnn::DataType ArmnnType, typename T>
@@ -108,27 +43,7 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    boost::ignore_unused(memoryManager);
-
-    const armnn::TensorShape inputShape{ 3, 1, 2 };
-    const armnn::TensorShape outputShape{ 3, 1, 2 };
-
-    float qScale    = 0.0625f;
-    int32_t qOffset = 64;
-
-    if (ArmnnType == armnn::DataType::QSymmS16)
-    {
-        qScale  = 0.1f;
-        qOffset = 0;
-    }
-
-    armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
-    inputTensorInfo.SetQuantizationScale(qScale);
-    inputTensorInfo.SetQuantizationOffset(qOffset);
-
-    armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
-    outputTensorInfo.SetQuantizationScale(qScale);
-    outputTensorInfo.SetQuantizationOffset(qOffset);
+    const unsigned int inputShape[] = { 3, 1, 2 };
 
     std::vector<float> inputValues
     {
@@ -143,35 +58,14 @@
     std::vector<float>expectedOutputValues(inputValues.size());
     std::transform(inputValues.begin(), inputValues.end(), expectedOutputValues.begin(), f);
 
-    auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputValues,inputTensorInfo));
-
-    LayerTestResult<T, 3> result(outputTensorInfo);
-    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo,
-                                             ConvertToDataType<ArmnnType>(expectedOutputValues,outputTensorInfo));
-
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
-    armnn::AbsQueueDescriptor descriptor;
-
-    armnn::WorkloadInfo info;
-
-    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
-    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAbs(descriptor, info);
-
-    inputHandle->Allocate();
-    outputHandle->Allocate();
-
-    CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
-
-    workload->PostAllocationConfigure();
-    workload->Execute();
-
-    CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
-
-    return result;
+    return ElementwiseUnaryTestHelper<3, ArmnnType>(
+        workloadFactory,
+        memoryManager,
+        armnn::UnaryOperation::Abs,
+        inputShape,
+        inputValues,
+        inputShape,
+        expectedOutputValues);
 }
 
 template<armnn::DataType ArmnnType, typename T>
@@ -179,14 +73,7 @@
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    const armnn::TensorShape inputShape{ 1, 2 };
-    const armnn::TensorShape outputShape{ 1, 2 };
-
-    armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
-    inputTensorInfo.SetQuantizationScale(0.1f);
-
-    armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
-    outputTensorInfo.SetQuantizationScale(0.1f);
+    const unsigned int inputShape[] = { 1, 2 };
 
     std::vector<float> inputValues
     {
@@ -198,9 +85,14 @@
         0.f, 0.f
     };
 
-    return Abs2dTestCommon<ArmnnType>(workloadFactory, memoryManager,
-                                inputTensorInfo, outputTensorInfo,
-                                inputValues, expectedOutputValues);
+    return ElementwiseUnaryTestHelper<2, ArmnnType>(
+        workloadFactory,
+        memoryManager,
+        armnn::UnaryOperation::Abs,
+        inputShape,
+        inputValues,
+        inputShape,
+        expectedOutputValues);
 }
 
 //
diff --git a/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp
index c0a779c..cbbe140 100644
--- a/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp
@@ -15,6 +15,7 @@
 #include <backendsCommon/WorkloadData.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
 
+#include <backendsCommon/test/DataTypeUtils.hpp>
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
@@ -202,4 +203,4 @@
         outValues,
         quantScale,
         quantOffset);
-}
+}
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.cpp
new file mode 100644
index 0000000..a2c88a6
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.cpp
@@ -0,0 +1,14 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ElementwiseUnaryTestImpl.hpp"
+
+std::unique_ptr<armnn::IWorkload> CreateWorkload(
+    const armnn::IWorkloadFactory& workloadFactory,
+    const armnn::WorkloadInfo& info,
+    const armnn::ElementwiseUnaryQueueDescriptor& descriptor)
+{
+    return workloadFactory.CreateElementwiseUnary(descriptor, info);
+}
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp
new file mode 100644
index 0000000..bea4ec2
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp
@@ -0,0 +1,113 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerTestResult.hpp"
+
+#include <armnn/ArmNN.hpp>
+
+#include <ResolveType.hpp>
+
+#include <armnn/backends/IBackendInternal.hpp>
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+#include <backendsCommon/test/DataTypeUtils.hpp>
+#include <backendsCommon/test/TensorCopyUtils.hpp>
+#include <backendsCommon/test/WorkloadTestUtils.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+#include <memory>
+
+std::unique_ptr<armnn::IWorkload> CreateWorkload(
+    const armnn::IWorkloadFactory& workloadFactory,
+    const armnn::WorkloadInfo& info,
+    const armnn::ElementwiseUnaryQueueDescriptor& descriptor);
+
+template <std::size_t NumDims,
+          armnn::DataType ArmnnType,
+          typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, NumDims> ElementwiseUnaryTestHelper(
+    armnn::IWorkloadFactory & workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
+    armnn::UnaryOperation op,
+    const unsigned int shape[NumDims],
+    std::vector<float> values,
+    float quantScale,
+    int quantOffset,
+    const unsigned int outShape[NumDims],
+    std::vector<float> outValues,
+    float outQuantScale,
+    int outQuantOffset)
+{
+    armnn::TensorInfo inputTensorInfo{NumDims, shape, ArmnnType};
+    armnn::TensorInfo outputTensorInfo{NumDims, outShape, ArmnnType};
+
+    inputTensorInfo.SetQuantizationScale(quantScale);
+    inputTensorInfo.SetQuantizationOffset(quantOffset);
+
+    outputTensorInfo.SetQuantizationScale(outQuantScale);
+    outputTensorInfo.SetQuantizationOffset(outQuantOffset);
+
+    auto input = MakeTensor<T, NumDims>(inputTensorInfo, ConvertToDataType<ArmnnType>(values, inputTensorInfo));
+
+    LayerTestResult<T, NumDims> ret(outputTensorInfo);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ElementwiseUnaryDescriptor desc(op);
+    armnn::ElementwiseUnaryQueueDescriptor qDesc;
+    qDesc.m_Parameters = desc;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(qDesc, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(qDesc, info, outputTensorInfo, outputHandle.get());
+    auto workload = CreateWorkload(workloadFactory, info, qDesc);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), input.origin());
+
+    workload->PostAllocationConfigure();
+    ExecuteWorkload(*workload, memoryManager);
+
+    CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
+
+    ret.outputExpected = MakeTensor<T, NumDims>(outputTensorInfo, ConvertToDataType<ArmnnType>(outValues,
+        inputTensorInfo));
+    return ret;
+}
+
+template <std::size_t NumDims,
+          armnn::DataType ArmnnType,
+          typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, NumDims> ElementwiseUnaryTestHelper(
+    armnn::IWorkloadFactory & workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
+    armnn::UnaryOperation op,
+    const unsigned int shape[NumDims],
+    std::vector<float> values,
+    const unsigned int outShape[NumDims],
+    std::vector<float> outValues,
+    float quantScale = 1.0f,
+    int quantOffset = 0)
+{
+    return ElementwiseUnaryTestHelper<NumDims, ArmnnType>(
+        workloadFactory,
+        memoryManager,
+        op,
+        shape,
+        values,
+        quantScale,
+        quantOffset,
+        outShape,
+        outValues,
+        quantScale,
+        quantOffset);
+}
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp
index db928cf..ca42383 100644
--- a/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp
@@ -4,76 +4,15 @@
 //
 
 #include "ReshapeTestImpl.hpp"
+#include "ElementwiseUnaryTestImpl.hpp"
 
 
-#include <backendsCommon/test/DataTypeUtils.hpp>
-#include <backendsCommon/test/TensorCopyUtils.hpp>
-#include <backendsCommon/test/WorkloadTestUtils.hpp>
-
-#include <test/TensorHelpers.hpp>
-
-namespace
-{
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 2> Rsqrt2dTestCommon(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::TensorInfo inputTensorInfo,
-    const armnn::TensorInfo outputTensorInfo,
-    const std::vector<float>& inputValues,
-    const std::vector<float>& expectedOutputValues)
-{
-    boost::ignore_unused(memoryManager);
-    auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputValues,inputTensorInfo));
-
-    LayerTestResult<T, 2> result(outputTensorInfo);
-
-    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
-                                             ConvertToDataType<ArmnnType>(expectedOutputValues,outputTensorInfo));
-
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
-    armnn::RsqrtQueueDescriptor descriptor;
-
-    armnn::WorkloadInfo info;
-
-    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
-    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateRsqrt(descriptor, info);
-
-    inputHandle->Allocate();
-    outputHandle->Allocate();
-
-    CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
-
-    workload->PostAllocationConfigure();
-    workload->Execute();
-
-    CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
-
-    return result;
-}
-
-} // anonymous namespace
-
 template<armnn::DataType ArmnnType, typename T>
 LayerTestResult<T, 2> Rsqrt2dTest(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    const armnn::TensorShape inputShape{ 2, 2 };
-    const armnn::TensorShape outputShape{ 2, 2 };
-
-    armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
-    inputTensorInfo.SetQuantizationScale(0.1f);
-    inputTensorInfo.SetQuantizationOffset(0);
-
-    armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
-    outputTensorInfo.SetQuantizationScale(0.1f);
-    outputTensorInfo.SetQuantizationOffset(0);
+    const unsigned int inputShape[] = { 2, 2 };
 
     std::vector<float> inputValues
     {
@@ -87,9 +26,14 @@
         0.25f, 0.2f
     };
 
-    return Rsqrt2dTestCommon<ArmnnType>(workloadFactory, memoryManager,
-                                inputTensorInfo, outputTensorInfo,
-                                inputValues, expectedOutputValues);
+    return ElementwiseUnaryTestHelper<2, ArmnnType>(
+        workloadFactory,
+        memoryManager,
+        armnn::UnaryOperation::Rsqrt,
+        inputShape,
+        inputValues,
+        inputShape,
+        expectedOutputValues);
 }
 
 template<armnn::DataType ArmnnType, typename T>
@@ -97,17 +41,7 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    boost::ignore_unused(memoryManager);
-    const armnn::TensorShape inputShape{ 3, 1, 2 };
-    const armnn::TensorShape outputShape{ 3, 1, 2 };
-
-    armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
-    inputTensorInfo.SetQuantizationScale(0.1f);
-    inputTensorInfo.SetQuantizationOffset(0);
-
-    armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
-    outputTensorInfo.SetQuantizationScale(0.1f);
-    outputTensorInfo.SetQuantizationOffset(0);
+    const unsigned int inputShape[] = { 3, 1, 2 };
 
     std::vector<float> inputValues
     {
@@ -121,35 +55,14 @@
         0.2f, 0.125f, 0.1f
     };
 
-    auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputValues,inputTensorInfo));
-
-    LayerTestResult<T, 3> result(outputTensorInfo);
-    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo,
-                                             ConvertToDataType<ArmnnType>(expectedOutputValues,outputTensorInfo));
-
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
-    armnn::RsqrtQueueDescriptor descriptor;
-
-    armnn::WorkloadInfo info;
-
-    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
-    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateRsqrt(descriptor, info);
-
-    inputHandle->Allocate();
-    outputHandle->Allocate();
-
-    CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
-
-    workload->PostAllocationConfigure();
-    workload->Execute();
-
-    CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
-
-    return result;
+    return ElementwiseUnaryTestHelper<3, ArmnnType>(
+        workloadFactory,
+        memoryManager,
+        armnn::UnaryOperation::Rsqrt,
+        inputShape,
+        inputValues,
+        inputShape,
+        expectedOutputValues);
 }
 
 template<armnn::DataType ArmnnType, typename T>
@@ -157,14 +70,7 @@
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    const armnn::TensorShape inputShape{ 1, 2 };
-    const armnn::TensorShape outputShape{ 1, 2 };
-
-    armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
-    inputTensorInfo.SetQuantizationScale(0.1f);
-
-    armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
-    outputTensorInfo.SetQuantizationScale(0.1f);
+    const unsigned int inputShape[] = { 1, 2 };
 
     std::vector<float> inputValues
     {
@@ -176,9 +82,14 @@
         INFINITY, -INFINITY
     };
 
-    return Rsqrt2dTestCommon<ArmnnType>(workloadFactory, memoryManager,
-                                inputTensorInfo, outputTensorInfo,
-                                inputValues, expectedOutputValues);
+    return ElementwiseUnaryTestHelper<2, ArmnnType>(
+        workloadFactory,
+        memoryManager,
+        armnn::UnaryOperation::Rsqrt,
+        inputShape,
+        inputValues,
+        inputShape,
+        expectedOutputValues);
 }
 
 template<armnn::DataType ArmnnType, typename T>
@@ -186,16 +97,7 @@
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    const armnn::TensorShape inputShape{ 1, 2 };
-    const armnn::TensorShape outputShape{ 1, 2 };
-
-    armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
-    inputTensorInfo.SetQuantizationScale(0.1f);
-    inputTensorInfo.SetQuantizationOffset(0);
-
-    armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
-    outputTensorInfo.SetQuantizationScale(0.1f);
-    outputTensorInfo.SetQuantizationOffset(0);
+    const unsigned int inputShape[] = { 1, 2 };
 
     std::vector<float> inputValues
     {
@@ -207,9 +109,14 @@
         -NAN, -NAN
     };
 
-    return Rsqrt2dTestCommon<ArmnnType>(workloadFactory, memoryManager,
-                                inputTensorInfo, outputTensorInfo,
-                                inputValues, expectedOutputValues);
+    return ElementwiseUnaryTestHelper<2, ArmnnType>(
+        workloadFactory,
+        memoryManager,
+        armnn::UnaryOperation::Rsqrt,
+        inputShape,
+        inputValues,
+        inputShape,
+        expectedOutputValues);
 }
 
 //
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index f7129d6..f8cc507 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -160,10 +160,8 @@
                                     const TensorInfo& output,
                                     Optional<std::string&> reasonIfUnsupported) const
 {
-    FORWARD_WORKLOAD_VALIDATE_FUNC(ClAbsWorkloadValidate,
-                                   reasonIfUnsupported,
-                                   input,
-                                   output);
+    ElementwiseUnaryDescriptor descriptor(UnaryOperation::Abs);
+    return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
 }
 
 bool ClLayerSupport::IsActivationSupported(const TensorInfo& input,
@@ -425,6 +423,29 @@
                                    output);
 }
 
+bool ClLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
+                                                 const TensorInfo& output,
+                                                 const ElementwiseUnaryDescriptor& descriptor,
+                                                 Optional<std::string&> reasonIfUnsupported) const
+{
+    if (descriptor.m_Operation == UnaryOperation::Abs)
+    {
+        FORWARD_WORKLOAD_VALIDATE_FUNC(ClAbsWorkloadValidate,
+                                       reasonIfUnsupported,
+                                       input,
+                                       output);
+    }
+    else if (descriptor.m_Operation == UnaryOperation::Rsqrt)
+    {
+        FORWARD_WORKLOAD_VALIDATE_FUNC(ClRsqrtWorkloadValidate,
+                                       reasonIfUnsupported,
+                                       input,
+                                       output);
+    }
+
+    return false;
+}
+
 bool ClLayerSupport::IsFloorSupported(const TensorInfo& input,
                                       const TensorInfo& output,
                                       Optional<std::string&> reasonIfUnsupported) const
@@ -685,7 +706,8 @@
                                       const TensorInfo& output,
                                       Optional<std::string&> reasonIfUnsupported) const
 {
-    FORWARD_WORKLOAD_VALIDATE_FUNC(ClRsqrtWorkloadValidate, reasonIfUnsupported, input, output);
+    ElementwiseUnaryDescriptor descriptor(UnaryOperation::Rsqrt);
+    return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
 }
 
 bool ClLayerSupport::IsSliceSupported(const TensorInfo& input,
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index a21589d..9371717 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -12,6 +12,7 @@
 class ClLayerSupport : public LayerSupportBase
 {
 public:
+    ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
     bool IsAbsSupported(const TensorInfo& input,
                         const TensorInfo& output,
                         Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
@@ -102,6 +103,11 @@
                              const TensorInfo& output,
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsElementwiseUnarySupported(const TensorInfo& input,
+                                     const TensorInfo& ouput,
+                                     const ElementwiseUnaryDescriptor& descriptor,
+                                     Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsFloorSupported(const TensorInfo& input,
                           const TensorInfo& output,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
@@ -223,6 +229,7 @@
                                    const TensorInfo& output,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
     bool IsRsqrtSupported(const TensorInfo& input,
                           const TensorInfo& output,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index f9e6632..4bb2e2a 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -131,7 +131,12 @@
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
                                                         const WorkloadInfo& info) const
 {
-    return MakeWorkload<ClAbsWorkload>(descriptor, info);
+    boost::ignore_unused(descriptor);
+
+    ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
+    elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Abs);
+
+    return CreateElementwiseUnary(elementwiseUnaryDescriptor, info);
 }
 
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
@@ -249,6 +254,28 @@
     return MakeWorkload<ClDivisionFloatWorkload, NullWorkload>(descriptor, info);
 }
 
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
+                                                                     const WorkloadInfo& info) const
+{
+    if (descriptor.m_Parameters.m_Operation == UnaryOperation::Abs)
+    {
+        AbsQueueDescriptor absQueueDescriptor;
+        absQueueDescriptor.m_Inputs  = descriptor.m_Inputs;
+        absQueueDescriptor.m_Outputs = descriptor.m_Outputs;
+
+        return MakeWorkload<ClAbsWorkload>(absQueueDescriptor, info);
+    }
+    else if (descriptor.m_Parameters.m_Operation == UnaryOperation::Rsqrt)
+    {
+        RsqrtQueueDescriptor rsqrtQueueDescriptor;
+        rsqrtQueueDescriptor.m_Inputs  = descriptor.m_Inputs;
+        rsqrtQueueDescriptor.m_Outputs = descriptor.m_Outputs;
+
+        return MakeWorkload<ClRsqrtWorkload>(rsqrtQueueDescriptor, info);
+    }
+    return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+}
+
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
                                                           const WorkloadInfo& info) const
 {
@@ -450,7 +477,12 @@
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
                                                           const WorkloadInfo& info) const
 {
-    return MakeWorkload<ClRsqrtWorkload>(descriptor, info);
+    boost::ignore_unused(descriptor);
+
+    ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
+    elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt);
+
+    return CreateElementwiseUnary(elementwiseUnaryDescriptor, info);
 }
 
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor,
diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp
index 8f377e9..980be91 100644
--- a/src/backends/cl/ClWorkloadFactory.hpp
+++ b/src/backends/cl/ClWorkloadFactory.hpp
@@ -38,6 +38,7 @@
                                                       DataLayout dataLayout,
                                                       const bool IsMemoryManaged = true) const override;
 
+    ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead")
     std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor,
                                          const WorkloadInfo& info) const override;
 
@@ -92,6 +93,9 @@
     std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor,
                                               const WorkloadInfo& info) const override;
 
+    std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
+                                                      const WorkloadInfo& info) const override;
+
     std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
@@ -178,6 +182,7 @@
     std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
                                                     const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead")
     std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp
index d79745c..92e7717 100644
--- a/src/backends/cl/test/ClCreateWorkloadTests.cpp
+++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp
@@ -146,18 +146,16 @@
                                     armnn::DataType::Float16>();
 }
 
-template <typename WorkloadType,
+template <typename WorkloadType, 
           typename DescriptorType,
-          typename LayerType,
           armnn::DataType DataType>
-static void ClCreateElementwiseUnaryWorkloadTest()
+static void ClCreateElementwiseUnaryWorkloadTest(armnn::UnaryOperation op)
 {
     Graph graph;
     ClWorkloadFactory factory =
         ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
 
-    auto workload = CreateElementwiseUnaryWorkloadTest
-        <WorkloadType, DescriptorType, LayerType, DataType>(factory, graph);
+    auto workload = CreateElementwiseUnaryWorkloadTest<WorkloadType, DescriptorType, DataType>(factory, graph, op);
 
     DescriptorType queueDescriptor = workload->GetData();
 
@@ -170,10 +168,8 @@
 
 BOOST_AUTO_TEST_CASE(CreateRsqrtFloat32WorkloadTest)
 {
-    ClCreateElementwiseUnaryWorkloadTest<ClRsqrtWorkload,
-                                         RsqrtQueueDescriptor,
-                                         RsqrtLayer,
-                                         armnn::DataType::Float32>();
+    ClCreateElementwiseUnaryWorkloadTest<ClRsqrtWorkload, RsqrtQueueDescriptor, armnn::DataType::Float32>(
+        UnaryOperation::Rsqrt);
 }
 
 template <typename BatchNormalizationWorkloadType, armnn::DataType DataType>
diff --git a/src/backends/cl/test/ClEndToEndTests.cpp b/src/backends/cl/test/ClEndToEndTests.cpp
index 260f8f6..eafdb7c 100644
--- a/src/backends/cl/test/ClEndToEndTests.cpp
+++ b/src/backends/cl/test/ClEndToEndTests.cpp
@@ -5,12 +5,12 @@
 
 #include <backendsCommon/test/EndToEndTestImpl.hpp>
 
-#include <backendsCommon/test/AbsEndToEndTestImpl.hpp>
 #include <backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp>
 #include <backendsCommon/test/ComparisonEndToEndTestImpl.hpp>
 #include <backendsCommon/test/ConcatEndToEndTestImpl.hpp>
 #include <backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp>
 #include <backendsCommon/test/DequantizeEndToEndTestImpl.hpp>
+#include <backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp>
 #include <backendsCommon/test/InstanceNormalizationEndToEndTestImpl.hpp>
 #include <backendsCommon/test/PreluEndToEndTestImpl.hpp>
 #include <backendsCommon/test/QuantizedLstmEndToEndTestImpl.hpp>
@@ -27,7 +27,15 @@
 // Abs
 BOOST_AUTO_TEST_CASE(ClAbsEndToEndTestFloat32)
 {
-    AbsEndToEnd<armnn::DataType::Float32>(defaultBackends);
+    std::vector<float> expectedOutput =
+    {
+        1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f,
+        3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f
+    };
+
+    ElementwiseUnarySimpleEndToEnd<armnn::DataType::Float32>(defaultBackends,
+                                                             UnaryOperation::Abs,
+                                                             expectedOutput);
 }
 
 // Constant
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index a73837b..fe9bffb 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -129,10 +129,8 @@
                                       const TensorInfo& output,
                                       Optional<std::string&> reasonIfUnsupported) const
 {
-    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonAbsWorkloadValidate,
-                                   reasonIfUnsupported,
-                                   input,
-                                   output);
+    ElementwiseUnaryDescriptor descriptor(UnaryOperation::Abs);
+    return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
 }
 
 bool NeonLayerSupport::IsActivationSupported(const TensorInfo& input,
@@ -386,6 +384,29 @@
                                    biases);
 }
 
+bool NeonLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
+                                                   const TensorInfo& output,
+                                                   const ElementwiseUnaryDescriptor& descriptor,
+                                                   Optional<std::string&> reasonIfUnsupported) const
+{
+    if (descriptor.m_Operation == UnaryOperation::Abs)
+    {
+        FORWARD_WORKLOAD_VALIDATE_FUNC(NeonAbsWorkloadValidate,
+                                       reasonIfUnsupported,
+                                       input,
+                                       output);
+    }
+    else if (descriptor.m_Operation == UnaryOperation::Rsqrt)
+    {
+        FORWARD_WORKLOAD_VALIDATE_FUNC(NeonRsqrtWorkloadValidate,
+                                       reasonIfUnsupported,
+                                       input,
+                                       output);
+    }
+
+    return false;
+}
+
 bool NeonLayerSupport::IsFloorSupported(const TensorInfo& input,
                                         const TensorInfo& output,
                                         Optional<std::string&> reasonIfUnsupported) const
@@ -656,7 +677,8 @@
                                         const TensorInfo& output,
                                         Optional<std::string&> reasonIfUnsupported) const
 {
-    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonRsqrtWorkloadValidate, reasonIfUnsupported, input, output);
+    ElementwiseUnaryDescriptor descriptor(UnaryOperation::Rsqrt);
+    return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
 }
 
 bool NeonLayerSupport::IsSliceSupported(const TensorInfo& input,
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index 8e6cd6a..d429aec 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -12,6 +12,7 @@
 class NeonLayerSupport : public LayerSupportBase
 {
 public:
+    ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
     bool IsAbsSupported(const TensorInfo& input,
                         const TensorInfo& output,
                         Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
@@ -103,6 +104,11 @@
                                                 const Optional<TensorInfo>& biases,
                                                 Optional<std::string&> reason = EmptyOptional()) const override;
 
+    bool IsElementwiseUnarySupported(const TensorInfo& input,
+                               const TensorInfo& output,
+                               const ElementwiseUnaryDescriptor& descriptor,
+                               Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsFloorSupported(const TensorInfo& input,
                           const TensorInfo& output,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
@@ -224,6 +230,7 @@
                                    const TensorInfo& output,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
     bool IsRsqrtSupported(const TensorInfo& input,
                           const TensorInfo& output,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 1cc9e50..82f9bdb 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -98,7 +98,12 @@
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
                                                           const WorkloadInfo& info) const
 {
-    return std::make_unique<NeonAbsWorkload>(descriptor, info);
+    boost::ignore_unused(descriptor);
+
+    ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
+    elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Abs);
+
+    return CreateElementwiseUnary(elementwiseUnaryDescriptor, info);
 }
 
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
@@ -214,6 +219,29 @@
     return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
 }
 
+std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& 
+                                                                       descriptor,
+                                                                       const WorkloadInfo& info) const
+{
+    if (descriptor.m_Parameters.m_Operation == UnaryOperation::Abs)
+    {
+        AbsQueueDescriptor absQueueDescriptor;
+        absQueueDescriptor.m_Inputs  = descriptor.m_Inputs;
+        absQueueDescriptor.m_Outputs = descriptor.m_Outputs;
+
+        return std::make_unique<NeonAbsWorkload>(absQueueDescriptor, info);
+    }
+    else if (descriptor.m_Parameters.m_Operation == UnaryOperation::Rsqrt)
+    {
+        RsqrtQueueDescriptor rsqrtQueueDescriptor;
+        rsqrtQueueDescriptor.m_Inputs  = descriptor.m_Inputs;
+        rsqrtQueueDescriptor.m_Outputs = descriptor.m_Outputs;
+
+        return std::make_unique<NeonRsqrtWorkload>(rsqrtQueueDescriptor, info);
+    }
+    return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
+}
+
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
                                                             const WorkloadInfo& info) const
 {
@@ -418,7 +446,12 @@
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor &descriptor,
                                                             const WorkloadInfo &info) const
 {
-    return std::make_unique<NeonRsqrtWorkload>(descriptor, info);
+    boost::ignore_unused(descriptor);
+
+    ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
+    elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt);
+
+    return CreateElementwiseUnary(elementwiseUnaryDescriptor, info);
 }
 
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor,
diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp
index b76a3a3..44c0629 100644
--- a/src/backends/neon/NeonWorkloadFactory.hpp
+++ b/src/backends/neon/NeonWorkloadFactory.hpp
@@ -39,6 +39,7 @@
                                                       DataLayout dataLayout,
                                                       const bool IsMemoryManaged = true) const override;
 
+    ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead")
     std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor,
                                          const WorkloadInfo& info) const override;
 
@@ -92,6 +93,9 @@
 
     std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor,
                                               const WorkloadInfo& info) const override;
+    
+    std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
+                                                      const WorkloadInfo& Info) const override;
 
     ARMNN_DEPRECATED_MSG("Use CreateComparison instead")
     std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor,
@@ -181,6 +185,7 @@
     std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
                                                     const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead")
     std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
index a08c8f7..400a5a3 100644
--- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp
+++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
@@ -181,36 +181,6 @@
                                       DataType::QAsymmU8>();
 }
 
-template <typename WorkloadType,
-          typename DescriptorType,
-          typename LayerType,
-          armnn::DataType DataType>
-static void NeonCreateElementwiseUnaryWorkloadTest()
-{
-    Graph graph;
-    NeonWorkloadFactory factory =
-        NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
-
-    auto workload = CreateElementwiseUnaryWorkloadTest
-        <WorkloadType, DescriptorType, LayerType, DataType>(factory, graph);
-
-    DescriptorType queueDescriptor = workload->GetData();
-
-    auto inputHandle  = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
-    auto outputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
-
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({2, 3}, DataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({2, 3}, DataType)));
-}
-
-BOOST_AUTO_TEST_CASE(CreateRsqrtFloat32Workload)
-{
-    NeonCreateElementwiseUnaryWorkloadTest<NeonRsqrtWorkload,
-                                           RsqrtQueueDescriptor,
-                                           RsqrtLayer,
-                                           DataType::Float32>();
-}
-
 template <typename BatchNormalizationWorkloadType, typename armnn::DataType DataType>
 static void NeonCreateBatchNormalizationWorkloadTest(DataLayout dataLayout)
 {
diff --git a/src/backends/neon/test/NeonEndToEndTests.cpp b/src/backends/neon/test/NeonEndToEndTests.cpp
index e1c929b..4e9fe0f 100644
--- a/src/backends/neon/test/NeonEndToEndTests.cpp
+++ b/src/backends/neon/test/NeonEndToEndTests.cpp
@@ -5,13 +5,13 @@
 
 #include <backendsCommon/test/EndToEndTestImpl.hpp>
 
-#include <backendsCommon/test/AbsEndToEndTestImpl.hpp>
 #include <backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp>
 #include <backendsCommon/test/ComparisonEndToEndTestImpl.hpp>
 #include <backendsCommon/test/ConcatEndToEndTestImpl.hpp>
 #include <backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp>
 #include <backendsCommon/test/DequantizeEndToEndTestImpl.hpp>
 #include <backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp>
+#include <backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp>
 #include <backendsCommon/test/InstanceNormalizationEndToEndTestImpl.hpp>
 #include <backendsCommon/test/PreluEndToEndTestImpl.hpp>
 #include <backendsCommon/test/QuantizedLstmEndToEndTestImpl.hpp>
@@ -28,7 +28,15 @@
 // Abs
 BOOST_AUTO_TEST_CASE(NeonAbsEndToEndTestFloat32)
 {
-    AbsEndToEnd<armnn::DataType::Float32>(defaultBackends);
+    std::vector<float> expectedOutput =
+    {
+        1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f,
+        3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f
+    };
+
+    ElementwiseUnarySimpleEndToEnd<armnn::DataType::Float32>(defaultBackends,
+                                                             UnaryOperation::Abs,
+                                                             expectedOutput);
 }
 
 // Constant
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 26a61d4..491081d 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -70,28 +70,10 @@
 bool RefLayerSupport::IsAbsSupported(const TensorInfo& input, const TensorInfo& output,
                                      Optional<std::string&> reasonIfUnsupported) const
 {
-    bool supported = true;
-    std::array<DataType,4> supportedTypes =
-        {
-            DataType::Float32,
-            DataType::Float16,
-            DataType::QAsymmU8,
-            DataType::QSymmS16
-        };
-
-    supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
-                                  "Reference abs: input type not supported");
-
-    supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
-                                  "Reference abs: output type not supported");
-
-    supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
-                                  "Reference abs: input and output types not matching");
-
-    supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
-                                  "Reference abs: input and output shapes have different number of total elements");
-
-    return supported;
+    return IsElementwiseUnarySupported(input,
+                                       output,
+                                       ElementwiseUnaryDescriptor(UnaryOperation::Abs),
+                                       reasonIfUnsupported);
 }
 
 bool RefLayerSupport::IsActivationSupported(const TensorInfo& input,
@@ -714,6 +696,39 @@
     return supported;
 }
 
+bool RefLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
+                                                  const TensorInfo& output,
+                                                  const ElementwiseUnaryDescriptor& descriptor,
+                                                  Optional<std::string&> reasonIfUnsupported) const
+{
+    boost::ignore_unused(descriptor);
+
+    std::array<DataType, 4> supportedTypes =
+    {
+        DataType::Float32,
+        DataType::Float16,
+        DataType::QAsymmU8,
+        DataType::QSymmS16
+    };
+
+    bool supported = true;
+
+    supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
+                                  "Reference elementwise unary: input type not supported");
+
+    supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
+                                  "Reference elementwise unary: output type not supported");
+
+    supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
+                                  "Reference elementwise unary: input and output types not matching");
+
+    supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
+                                  "Reference elementwise unary: input and output shapes"
+                                  "have different number of total elements");
+
+    return supported;
+}
+
 bool RefLayerSupport::IsEqualSupported(const TensorInfo& input0,
                                        const TensorInfo& input1,
                                        const TensorInfo& output,
@@ -1499,28 +1514,10 @@
                                        const TensorInfo& output,
                                        Optional<std::string&> reasonIfUnsupported) const
 {
-    bool supported = true;
-    std::array<DataType,4> supportedTypes =
-    {
-            DataType::Float32,
-            DataType::Float16,
-            DataType::QAsymmU8,
-            DataType::QSymmS16
-    };
-
-    supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
-                                  "Reference rsqrt: input type not supported");
-
-    supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
-                                  "Reference rsqrt: output type not supported");
-
-    supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
-                                  "Reference rsqrt: input and output types not matching");
-
-    supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
-                                  "Reference Rsqrt: input and output shapes have different number of total elements");
-
-    return supported;
+    return IsElementwiseUnarySupported(input,
+                                       output,
+                                       ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt),
+                                       reasonIfUnsupported);
 }
 
 bool RefLayerSupport::IsSliceSupported(const TensorInfo& input,
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index a7d6303..123c264 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -12,6 +12,7 @@
 class RefLayerSupport : public LayerSupportBase
 {
 public:
+    ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
     bool IsAbsSupported(const TensorInfo& input,
                         const TensorInfo& output,
                         Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
@@ -117,6 +118,11 @@
                              const TensorInfo& output,
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsElementwiseUnarySupported(const TensorInfo& input,
+                                     const TensorInfo& output,
+                                     const ElementwiseUnaryDescriptor& descriptor,
+                                     Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
     bool IsEqualSupported(const TensorInfo& input0,
                           const TensorInfo& input1,
@@ -247,7 +253,8 @@
                            const TensorInfo& output,
                            const ResizeDescriptor& descriptor,
                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
+    
+    ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
     bool IsRsqrtSupported(const TensorInfo& input,
                           const TensorInfo& output,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 2db47d3..e7a9c19 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -98,7 +98,11 @@
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
                                                          const WorkloadInfo& info) const
 {
-    return std::make_unique<RefAbsWorkload>(descriptor, info);
+    boost::ignore_unused(descriptor);
+    ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
+    elementwiseUnaryDescriptor.m_Parameters.m_Operation = UnaryOperation::Abs;
+
+    return CreateElementwiseUnary(elementwiseUnaryDescriptor, info);
 }
 
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
@@ -221,6 +225,12 @@
     return std::make_unique<RefDivisionWorkload>(descriptor, info);
 }
 
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
+                                                                      const WorkloadInfo& info) const
+{
+    return std::make_unique<RefElementwiseUnaryWorkload>(descriptor, info);
+}
+
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
                                                            const WorkloadInfo& info) const
 {
@@ -463,7 +473,11 @@
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
                                                            const WorkloadInfo& info) const
 {
-    return std::make_unique<RefRsqrtWorkload>(descriptor, info);
+    boost::ignore_unused(descriptor);
+    ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
+    elementwiseUnaryDescriptor.m_Parameters.m_Operation = UnaryOperation::Rsqrt;
+
+    return CreateElementwiseUnary(elementwiseUnaryDescriptor, info);
 }
 
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor,
diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp
index 80393c3..b5b9b0f 100644
--- a/src/backends/reference/RefWorkloadFactory.hpp
+++ b/src/backends/reference/RefWorkloadFactory.hpp
@@ -59,6 +59,7 @@
                                                       DataLayout dataLayout,
                                                       const bool IsMemoryManaged = true) const override;
 
+    ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead")
     std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor,
                                          const WorkloadInfo& info) const override;
 
@@ -113,6 +114,9 @@
     std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor,
                                               const WorkloadInfo& info) const override;
 
+    std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
+                                                      const WorkloadInfo& info) const override;
+
     ARMNN_DEPRECATED_MSG("Use CreateComparison instead")
     std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
@@ -204,6 +208,7 @@
     std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
                                                     const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead")
     std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index 5f9af59..412dc94 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -21,7 +21,6 @@
         RefWorkloadFactory.cpp \
         RefRegistryInitializer.cpp \
         RefTensorHandleFactory.cpp \
-        workloads/Abs.cpp \
         workloads/Activation.cpp \
         workloads/ArgMinMax.cpp \
         workloads/BatchNormImpl.cpp \
@@ -43,7 +42,6 @@
         workloads/Pad.cpp \
         workloads/Pooling2d.cpp \
         workloads/PreluImpl.cpp \
-        workloads/RefAbsWorkload.cpp \
         workloads/RefActivationWorkload.cpp \
         workloads/RefArgMinMaxWorkload.cpp \
         workloads/RefBatchNormalizationWorkload.cpp \
@@ -60,6 +58,7 @@
         workloads/RefDequantizeWorkload.cpp \
         workloads/RefDetectionPostProcessWorkload.cpp \
         workloads/RefElementwiseWorkload.cpp \
+        workloads/RefElementwiseUnaryWorkload.cpp \
         workloads/RefFakeQuantizationFloat32Workload.cpp \
         workloads/RefFloorWorkload.cpp \
         workloads/RefFullyConnectedWorkload.cpp \
@@ -78,7 +77,6 @@
         workloads/RefReshapeWorkload.cpp \
         workloads/RefResizeBilinearWorkload.cpp \
         workloads/RefResizeWorkload.cpp \
-        workloads/RefRsqrtWorkload.cpp \
         workloads/RefSliceWorkload.cpp \
         workloads/RefSoftmaxWorkload.cpp \
         workloads/RefSpaceToBatchNdWorkload.cpp \
@@ -88,7 +86,6 @@
         workloads/RefSplitterWorkload.cpp \
         workloads/RefTransposeConvolution2dWorkload.cpp \
         workloads/Resize.cpp \
-        workloads/Rsqrt.cpp \
         workloads/Slice.cpp \
         workloads/SpaceToBatchNd.cpp \
         workloads/SpaceToDepth.cpp \
diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp
index 23a8e9b..b83d205 100644
--- a/src/backends/reference/test/RefCreateWorkloadTests.cpp
+++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp
@@ -717,41 +717,6 @@
     RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
 }
 
-template <typename RsqrtWorkloadType, armnn::DataType DataType>
-static void RefCreateRsqrtTest()
-{
-    Graph graph;
-    RefWorkloadFactory factory = GetFactory();
-
-    auto workload = CreateRsqrtWorkloadTest<RsqrtWorkloadType, DataType>(factory, graph);
-
-    // Checks that outputs are as we expect them (see definition of CreateRsqrtWorkloadTest).
-    CheckInputOutput(std::move(workload),
-                     TensorInfo({ 1, 1 }, DataType),
-                     TensorInfo({ 1, 1 }, DataType));
-
-}
-
-BOOST_AUTO_TEST_CASE(CreateRsqrtFloat32)
-{
-    RefCreateRsqrtTest<RefRsqrtWorkload, armnn::DataType::Float32>();
-}
-
-BOOST_AUTO_TEST_CASE(CreateRsqrtFloat16)
-{
-    RefCreateRsqrtTest<RefRsqrtWorkload, armnn::DataType::Float16>();
-}
-
-BOOST_AUTO_TEST_CASE(CreateRsqrtUint8)
-{
-    RefCreateRsqrtTest<RefRsqrtWorkload, armnn::DataType::QAsymmU8>();
-}
-
-BOOST_AUTO_TEST_CASE(CreateRsqrtQsymm16)
-{
-    RefCreateRsqrtTest<RefRsqrtWorkload, armnn::DataType::QSymmS16>();
-}
-
 template <typename BatchToSpaceNdWorkloadType, armnn::DataType DataType>
 static void RefCreateBatchToSpaceNdTest()
 {
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index 75eccde..54a6881 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -5,7 +5,6 @@
 
 #include <backendsCommon/test/EndToEndTestImpl.hpp>
 
-#include <backendsCommon/test/AbsEndToEndTestImpl.hpp>
 #include <backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp>
 #include <backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp>
 #include <backendsCommon/test/ComparisonEndToEndTestImpl.hpp>
@@ -13,6 +12,7 @@
 #include <backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp>
 #include <backendsCommon/test/DequantizeEndToEndTestImpl.hpp>
 #include <backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp>
+#include <backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp>
 #include <backendsCommon/test/GatherEndToEndTestImpl.hpp>
 #include <backendsCommon/test/InstanceNormalizationEndToEndTestImpl.hpp>
 #include <backendsCommon/test/LogSoftmaxEndToEndTestImpl.hpp>
@@ -32,17 +32,43 @@
 // Abs
 BOOST_AUTO_TEST_CASE(RefAbsEndToEndTestFloat32)
 {
-    AbsEndToEnd<armnn::DataType::Float32>(defaultBackends);
+    std::vector<float> expectedOutput =
+    {
+        1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f,
+        3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f
+    };
+
+    ElementwiseUnarySimpleEndToEnd<armnn::DataType::Float32>(defaultBackends,
+                                                             UnaryOperation::Abs,
+                                                             expectedOutput);
 }
 
 BOOST_AUTO_TEST_CASE(RefAbsEndToEndTestUint8)
 {
-    AbsEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    // Note the expected output will be implicitly quantized by the below test function
+    std::vector<float> expectedOutput =
+    {
+        1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f,
+        3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f
+    };
+
+    ElementwiseUnarySimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
+                                                                     UnaryOperation::Abs,
+                                                                     expectedOutput);
 }
 
 BOOST_AUTO_TEST_CASE(RefAbsEndToEndTestInt16)
 {
-    AbsEndToEnd<armnn::DataType::QSymmS16>(defaultBackends);
+    // Note the expected output will be implicitly quantized by the below test function
+    std::vector<float> expectedOutput =
+    {
+        1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f,
+        3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f
+    };
+
+    ElementwiseUnarySimpleEndToEnd<armnn::DataType::QSymmS16>(defaultBackends,
+                                                                     UnaryOperation::Abs,
+                                                                     expectedOutput);
 }
 
 // Constant
diff --git a/src/backends/reference/workloads/Abs.cpp b/src/backends/reference/workloads/Abs.cpp
deleted file mode 100644
index 6a6a79c..0000000
--- a/src/backends/reference/workloads/Abs.cpp
+++ /dev/null
@@ -1,23 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "Abs.hpp"
-
-namespace armnn
-{
-
-void Abs(Decoder<float>& in,
-         Encoder<float>& out,
-         const TensorInfo& tensorInfo)
-{
-    for (unsigned int i = 0u; i < tensorInfo.GetNumElements(); ++i)
-    {
-        out[i];
-        in[i];
-        out.Set(std::abs(in.Get()));
-    }
-}
-
-} //namespace armnn
diff --git a/src/backends/reference/workloads/Abs.hpp b/src/backends/reference/workloads/Abs.hpp
index b1165d2..b05f2e3 100644
--- a/src/backends/reference/workloads/Abs.hpp
+++ b/src/backends/reference/workloads/Abs.hpp
@@ -1,19 +1,22 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
-#include "BaseIterator.hpp"
-#include <armnn/Tensor.hpp>
-#include <armnn/Types.hpp>
+#pragma once
+
+#include <iostream>
 
 namespace armnn
 {
-
-/// Performs the absolute function elementwise
-/// on the inputs to give the outputs.
-void Abs(Decoder<float>& in,
-         Encoder<float>& out,
-         const TensorInfo& tensorInfo);
+    template<typename T>
+struct abs : public std::unary_function<T, T>
+    {
+        T
+        operator () (const T&  inputData) const
+        {
+            return std::abs(inputData);
+        }
+    };
 
 } //namespace armnn
diff --git a/src/backends/reference/workloads/Broadcast.cpp b/src/backends/reference/workloads/Broadcast.cpp
index 8421a0a..24af0fc 100644
--- a/src/backends/reference/workloads/Broadcast.cpp
+++ b/src/backends/reference/workloads/Broadcast.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -30,4 +30,23 @@
     }
 }
 
+BroadcastLoop::BroadcastLoop(const TensorShape& inShape, const TensorShape& outShape)
+: m_DimData(outShape.GetNumDimensions())
+{
+    const unsigned int numDims = GetNumDimensions();
+
+    unsigned int sIn = 1;
+    unsigned int sOut = 1;
+
+    for (unsigned int j = numDims - 1, k = 0; k < numDims ; k++, j--)
+    {
+        m_DimData[j].m_DimSize = outShape[j];
+        m_DimData[j].m_Stride1 = (inShape[j] > 1) ? sIn : 0;
+        m_DimData[j].m_StrideOut = sOut;
+
+        sIn *= inShape[j];
+        sOut *= outShape[j];
+    }
+}
+
 } // namespace armnn
diff --git a/src/backends/reference/workloads/Broadcast.hpp b/src/backends/reference/workloads/Broadcast.hpp
index 5bf6be8..a3d944a 100644
--- a/src/backends/reference/workloads/Broadcast.hpp
+++ b/src/backends/reference/workloads/Broadcast.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -15,6 +15,8 @@
 {
     BroadcastLoop(const TensorShape& inShape0, const TensorShape& inShape1, const TensorShape& outShape);
 
+    BroadcastLoop(const TensorShape& inShape, const TensorShape& outShape);
+
     unsigned int GetNumDimensions()
     {
         return static_cast<unsigned int>(m_DimData.size());
@@ -56,6 +58,37 @@
         outData -= outDataMovement;
     }
 
+    template <typename Func, typename DecoderOp, typename EncoderOp>
+    void Unroll(Func operationFunc,
+                unsigned int dimension,
+                DecoderOp& inData,
+                EncoderOp& outData)
+    {
+        if (dimension >= GetNumDimensions())
+        {
+            outData.Set(operationFunc(inData.Get()));
+            return;
+        }
+
+        unsigned int inDataMovement = 0;
+        unsigned int outDataMovement = 0;
+
+        for (unsigned int i = 0; i < m_DimData[dimension].m_DimSize; i++)
+        {
+            Unroll(operationFunc, dimension + 1, inData, outData);
+
+            inData += m_DimData[dimension].m_Stride1;
+            outData += m_DimData[dimension].m_StrideOut;
+
+            inDataMovement += m_DimData[dimension].m_Stride1;
+            outDataMovement += m_DimData[dimension].m_StrideOut;
+        }
+
+        // move iterator back to the start
+        inData -= inDataMovement;
+        outData -= outDataMovement;
+    }
+
 private:
     // Struct to hold the dimension data.
     struct BroadcastDimensionData
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index dbbdd89..6795204 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -4,7 +4,6 @@
 #
 
 list(APPEND armnnRefBackendWorkloads_sources
-    Abs.cpp
     Abs.hpp
     ArgMinMax.cpp
     ArgMinMax.hpp
@@ -33,6 +32,7 @@
     ElementwiseFunction.cpp
     ElementwiseFunction.hpp
     Encoders.hpp
+    Exp.hpp
     FullyConnected.cpp
     FullyConnected.hpp
     Gather.cpp
@@ -55,8 +55,6 @@
     Pooling2d.hpp
     PreluImpl.cpp
     PreluImpl.hpp
-    RefAbsWorkload.cpp
-    RefAbsWorkload.hpp
     RefActivationWorkload.cpp
     RefActivationWorkload.hpp
     RefArgMinMaxWorkload.cpp
@@ -89,6 +87,8 @@
     RefDequantizeWorkload.hpp
     RefDetectionPostProcessWorkload.cpp
     RefDetectionPostProcessWorkload.hpp
+    RefElementwiseUnaryWorkload.cpp
+    RefElementwiseUnaryWorkload.hpp
     RefFakeQuantizationFloat32Workload.cpp
     RefFakeQuantizationFloat32Workload.hpp
     RefFloorWorkload.cpp
@@ -125,8 +125,6 @@
     RefResizeBilinearWorkload.hpp
     RefResizeWorkload.cpp
     RefResizeWorkload.hpp
-    RefRsqrtWorkload.cpp
-    RefRsqrtWorkload.hpp
     RefSliceWorkload.cpp
     RefSliceWorkload.hpp
     RefSoftmaxWorkload.cpp
@@ -147,7 +145,6 @@
     RefWorkloadUtils.hpp
     Resize.cpp
     Resize.hpp
-    Rsqrt.cpp
     Rsqrt.hpp
     Slice.cpp
     Slice.hpp
@@ -159,6 +156,7 @@
     SpaceToDepth.cpp
     Splitter.hpp
     Splitter.cpp
+    Sqrt.hpp
     Stack.cpp
     Stack.hpp
     StridedSlice.hpp
diff --git a/src/backends/reference/workloads/ElementwiseFunction.cpp b/src/backends/reference/workloads/ElementwiseFunction.cpp
index 888037f..5687cf5 100644
--- a/src/backends/reference/workloads/ElementwiseFunction.cpp
+++ b/src/backends/reference/workloads/ElementwiseFunction.cpp
@@ -7,36 +7,56 @@
 #include "Broadcast.hpp"
 #include <functional>
 #include "Minimum.hpp"
-
 #include "Maximum.hpp"
+#include "Abs.hpp"
+#include "Exp.hpp"
+#include "Rsqrt.hpp"
+#include "Sqrt.hpp"
+
 
 namespace armnn
 {
 
 template <typename Functor>
-ElementwiseFunction<Functor>::ElementwiseFunction(const TensorShape& inShape0,
-                                                   const TensorShape& inShape1,
-                                                   const TensorShape& outShape,
-                                                   armnn::Decoder<InType>& inData0,
-                                                   armnn::Decoder<InType>& inData1,
-                                                   armnn::Encoder<OutType>& outData)
+ElementwiseBinaryFunction<Functor>::ElementwiseBinaryFunction(const TensorShape& inShape0,
+                                                              const TensorShape& inShape1,
+                                                              const TensorShape& outShape,
+                                                              Decoder<InType>& inData0,
+                                                              Decoder<InType>& inData1,
+                                                              Encoder<OutType>& outData)
 {
     BroadcastLoop(inShape0, inShape1, outShape).Unroll(Functor(), 0, inData0, inData1, outData);
 }
 
+template <typename Functor>
+ElementwiseUnaryFunction<Functor>::ElementwiseUnaryFunction(const TensorShape& inShape,
+                                                            const TensorShape& outShape,
+                                                            Decoder<InType>& inData,
+                                                            Encoder<OutType>& outData)
+{
+    BroadcastLoop(inShape, outShape).Unroll(Functor(), 0, inData, outData);
+}
+
 } //namespace armnn
 
-template struct armnn::ElementwiseFunction<std::plus<float>>;
-template struct armnn::ElementwiseFunction<std::minus<float>>;
-template struct armnn::ElementwiseFunction<std::multiplies<float>>;
-template struct armnn::ElementwiseFunction<std::divides<float>>;
-template struct armnn::ElementwiseFunction<armnn::maximum<float>>;
-template struct armnn::ElementwiseFunction<armnn::minimum<float>>;
+template struct armnn::ElementwiseBinaryFunction<std::plus<float>>;
+template struct armnn::ElementwiseBinaryFunction<std::minus<float>>;
+template struct armnn::ElementwiseBinaryFunction<std::multiplies<float>>;
+template struct armnn::ElementwiseBinaryFunction<std::divides<float>>;
+template struct armnn::ElementwiseBinaryFunction<armnn::maximum<float>>;
+template struct armnn::ElementwiseBinaryFunction<armnn::minimum<float>>;
 
 // Comparison
-template struct armnn::ElementwiseFunction<std::equal_to<float>>;
-template struct armnn::ElementwiseFunction<std::greater<float>>;
-template struct armnn::ElementwiseFunction<std::greater_equal<float>>;
-template struct armnn::ElementwiseFunction<std::less<float>>;
-template struct armnn::ElementwiseFunction<std::less_equal<float>>;
-template struct armnn::ElementwiseFunction<std::not_equal_to<float>>;
+template struct armnn::ElementwiseBinaryFunction<std::equal_to<float>>;
+template struct armnn::ElementwiseBinaryFunction<std::greater<float>>;
+template struct armnn::ElementwiseBinaryFunction<std::greater_equal<float>>;
+template struct armnn::ElementwiseBinaryFunction<std::less<float>>;
+template struct armnn::ElementwiseBinaryFunction<std::less_equal<float>>;
+template struct armnn::ElementwiseBinaryFunction<std::not_equal_to<float>>;
+
+// Unary
+template struct armnn::ElementwiseUnaryFunction<armnn::abs<float>>;
+template struct armnn::ElementwiseUnaryFunction<armnn::exp<float>>;
+template struct armnn::ElementwiseUnaryFunction<std::negate<float>>;
+template struct armnn::ElementwiseUnaryFunction<armnn::rsqrt<float>>;
+template struct armnn::ElementwiseUnaryFunction<armnn::sqrt<float>>;
diff --git a/src/backends/reference/workloads/ElementwiseFunction.hpp b/src/backends/reference/workloads/ElementwiseFunction.hpp
index fd1fab0..8259ba5 100644
--- a/src/backends/reference/workloads/ElementwiseFunction.hpp
+++ b/src/backends/reference/workloads/ElementwiseFunction.hpp
@@ -12,17 +12,29 @@
 {
 
 template <typename Functor>
-struct ElementwiseFunction
+struct ElementwiseBinaryFunction
 {
     using OutType = typename Functor::result_type;
     using InType = typename Functor::first_argument_type;
 
-    ElementwiseFunction(const TensorShape& inShape0,
-                        const TensorShape& inShape1,
-                        const TensorShape& outShape,
-                        armnn::Decoder<InType>& inData0,
-                        armnn::Decoder<InType>& inData1,
-                        armnn::Encoder<OutType>& outData);
+    ElementwiseBinaryFunction(const TensorShape& inShape0,
+                              const TensorShape& inShape1,
+                              const TensorShape& outShape,
+                              Decoder<InType>& inData0,
+                              Decoder<InType>& inData1,
+                              Encoder<OutType>& outData);
+};
+
+template <typename Functor>
+struct ElementwiseUnaryFunction
+{
+    using OutType = typename Functor::result_type;
+    using InType = typename Functor::argument_type;
+
+    ElementwiseUnaryFunction(const TensorShape& inShape,
+                             const TensorShape& outShape,
+                             Decoder<InType>& inData,
+                             Encoder<OutType>& outData);
 };
 
 } //namespace armnn
diff --git a/src/backends/reference/workloads/Exp.hpp b/src/backends/reference/workloads/Exp.hpp
new file mode 100644
index 0000000..1a04672
--- /dev/null
+++ b/src/backends/reference/workloads/Exp.hpp
@@ -0,0 +1,22 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <iostream>
+
+namespace armnn
+{
+    template<typename T>
+struct exp : public std::unary_function<T, T>
+    {
+        T
+        operator () (const T&  inputData) const
+        {
+            return std::exp(inputData);
+        }
+    };
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/RefAbsWorkload.cpp b/src/backends/reference/workloads/RefAbsWorkload.cpp
deleted file mode 100644
index 5c1f8c0..0000000
--- a/src/backends/reference/workloads/RefAbsWorkload.cpp
+++ /dev/null
@@ -1,37 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "RefAbsWorkload.hpp"
-
-#include "Abs.hpp"
-#include "Decoders.hpp"
-#include "Encoders.hpp"
-#include "RefWorkloadUtils.hpp"
-
-#include <Profiling.hpp>
-
-namespace armnn
-{
-
-void RefAbsWorkload::Execute() const
-{
-    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefAbsWorkload_Execute");
-
-    const TensorInfo& inputTensorInfo = GetTensorInfo(m_Data.m_Inputs[0]);
-
-    std::unique_ptr<Decoder<float>> decoderPtr = MakeDecoder<float>(inputTensorInfo, m_Data.m_Inputs[0]->Map());
-    Decoder<float>& decoder = *decoderPtr;
-
-    const TensorInfo& outputTensorInfo = GetTensorInfo(m_Data.m_Outputs[0]);
-
-    std::unique_ptr<Encoder<float>> encoderPtr = MakeEncoder<float>(outputTensorInfo, m_Data.m_Outputs[0]->Map());
-    Encoder<float>& encoder = *encoderPtr;
-
-    Abs(decoder,
-        encoder,
-        inputTensorInfo);
-}
-
-} //namespace armnn
diff --git a/src/backends/reference/workloads/RefAbsWorkload.hpp b/src/backends/reference/workloads/RefAbsWorkload.hpp
deleted file mode 100644
index 6810555..0000000
--- a/src/backends/reference/workloads/RefAbsWorkload.hpp
+++ /dev/null
@@ -1,21 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <backendsCommon/Workload.hpp>
-#include <backendsCommon/WorkloadData.hpp>
-
-namespace armnn
-{
-
-class RefAbsWorkload : public BaseWorkload<AbsQueueDescriptor>
-{
-public:
-    using BaseWorkload<AbsQueueDescriptor>::BaseWorkload;
-    virtual void Execute() const override;
-};
-
-} //namespace armnn
diff --git a/src/backends/reference/workloads/RefComparisonWorkload.cpp b/src/backends/reference/workloads/RefComparisonWorkload.cpp
index 6044622..52ad9a2 100644
--- a/src/backends/reference/workloads/RefComparisonWorkload.cpp
+++ b/src/backends/reference/workloads/RefComparisonWorkload.cpp
@@ -52,12 +52,12 @@
     m_Input1->Reset(m_Data.m_Inputs[1]->Map());
     m_Output->Reset(m_Data.m_Outputs[0]->Map());
 
-    using EqualFunction          = ElementwiseFunction<std::equal_to<InType>>;
-    using GreaterFunction        = ElementwiseFunction<std::greater<InType>>;
-    using GreaterOrEqualFunction = ElementwiseFunction<std::greater_equal<InType>>;
-    using LessFunction           = ElementwiseFunction<std::less<InType>>;
-    using LessOrEqualFunction    = ElementwiseFunction<std::less_equal<InType>>;
-    using NotEqualFunction       = ElementwiseFunction<std::not_equal_to<InType>>;
+    using EqualFunction          = ElementwiseBinaryFunction<std::equal_to<InType>>;
+    using GreaterFunction        = ElementwiseBinaryFunction<std::greater<InType>>;
+    using GreaterOrEqualFunction = ElementwiseBinaryFunction<std::greater_equal<InType>>;
+    using LessFunction           = ElementwiseBinaryFunction<std::less<InType>>;
+    using LessOrEqualFunction    = ElementwiseBinaryFunction<std::less_equal<InType>>;
+    using NotEqualFunction       = ElementwiseBinaryFunction<std::not_equal_to<InType>>;
 
     switch (m_Data.m_Parameters.m_Operation)
     {
diff --git a/src/backends/reference/workloads/RefElementwiseUnaryWorkload.cpp b/src/backends/reference/workloads/RefElementwiseUnaryWorkload.cpp
new file mode 100644
index 0000000..4fbb0d1
--- /dev/null
+++ b/src/backends/reference/workloads/RefElementwiseUnaryWorkload.cpp
@@ -0,0 +1,95 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefElementwiseUnaryWorkload.hpp"
+
+#include "Decoders.hpp"
+#include "ElementwiseFunction.hpp"
+#include "Encoders.hpp"
+#include "RefWorkloadUtils.hpp"
+#include "Abs.hpp"
+#include "Exp.hpp"
+#include "Rsqrt.hpp"
+#include "Sqrt.hpp"
+
+#include <Profiling.hpp>
+
+#include <armnn/TypesUtils.hpp>
+
+#include <functional>
+
+namespace armnn
+{
+
+RefElementwiseUnaryWorkload::RefElementwiseUnaryWorkload(const ElementwiseUnaryQueueDescriptor& desc,
+                                                         const WorkloadInfo& info)
+    : BaseWorkload<ElementwiseUnaryQueueDescriptor>(desc, info)
+{}
+
+void RefElementwiseUnaryWorkload::PostAllocationConfigure()
+{
+    const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
+    const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+
+    m_Input = MakeDecoder<InType>(inputInfo);
+
+    m_Output = MakeEncoder<OutType>(outputInfo);
+}
+
+void RefElementwiseUnaryWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefElementwiseUnaryWorkload_Execute");
+
+    const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
+    const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+
+    const TensorShape& inShape = inputInfo.GetShape();
+    const TensorShape& outShape = outputInfo.GetShape();
+
+    m_Input->Reset(m_Data.m_Inputs[0]->Map());
+    m_Output->Reset(m_Data.m_Outputs[0]->Map());
+
+    using AbsFunction   = ElementwiseUnaryFunction<abs<InType>>;
+    using ExpFunction   = ElementwiseUnaryFunction<exp<InType>>;
+    using NegFunction   = ElementwiseUnaryFunction<std::negate<InType>>;
+    using RsqrtFunction = ElementwiseUnaryFunction<rsqrt<InType>>;
+    using SqrtFunction  = ElementwiseUnaryFunction<sqrt<InType>>;
+
+    switch (m_Data.m_Parameters.m_Operation)
+    {
+        case UnaryOperation::Abs:
+        {
+            AbsFunction(inShape, outShape, *m_Input, *m_Output);
+            break;
+        }
+        case UnaryOperation::Exp:
+        {
+            ExpFunction(inShape, outShape, *m_Input, *m_Output);
+            break;
+        }
+        case UnaryOperation::Neg:
+        {
+            NegFunction(inShape, outShape, *m_Input, *m_Output);
+            break;
+        }
+        case UnaryOperation::Rsqrt:
+        {
+            RsqrtFunction(inShape, outShape, *m_Input, *m_Output);
+            break;
+        }
+        case UnaryOperation::Sqrt:
+        {
+            SqrtFunction(inShape, outShape, *m_Input, *m_Output);
+            break;
+        }
+        default:
+        {
+            throw InvalidArgumentException(std::string("Unsupported unary operation ") +
+                GetUnaryOperationAsCString(m_Data.m_Parameters.m_Operation), CHECK_LOCATION());
+        }
+    }
+}
+
+} // namespace armnn
diff --git a/src/backends/reference/workloads/RefElementwiseUnaryWorkload.hpp b/src/backends/reference/workloads/RefElementwiseUnaryWorkload.hpp
new file mode 100644
index 0000000..efb2865
--- /dev/null
+++ b/src/backends/reference/workloads/RefElementwiseUnaryWorkload.hpp
@@ -0,0 +1,33 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "BaseIterator.hpp"
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+
+namespace armnn
+{
+
+class RefElementwiseUnaryWorkload : public BaseWorkload<ElementwiseUnaryQueueDescriptor>
+{
+public:
+    using BaseWorkload<ElementwiseUnaryQueueDescriptor>::m_Data;
+
+    RefElementwiseUnaryWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info);
+    void PostAllocationConfigure() override;
+    void Execute() const override;
+
+private:
+    using InType  = float;
+    using OutType = float;
+
+    std::unique_ptr<Decoder<InType>>  m_Input;
+    std::unique_ptr<Encoder<OutType>> m_Output;
+};
+
+} // namespace armnn
diff --git a/src/backends/reference/workloads/RefElementwiseWorkload.cpp b/src/backends/reference/workloads/RefElementwiseWorkload.cpp
index 7e02f03..18bf0a7 100644
--- a/src/backends/reference/workloads/RefElementwiseWorkload.cpp
+++ b/src/backends/reference/workloads/RefElementwiseWorkload.cpp
@@ -53,12 +53,12 @@
     m_Input1->Reset(m_Data.m_Inputs[1]->Map());
     m_Output->Reset(m_Data.m_Outputs[0]->Map());
 
-    ElementwiseFunction<Functor>(inShape0,
-                                 inShape1,
-                                 outShape,
-                                 *m_Input0,
-                                 *m_Input1,
-                                 *m_Output);
+    ElementwiseBinaryFunction<Functor>(inShape0,
+                                       inShape1,
+                                       outShape,
+                                       *m_Input0,
+                                       *m_Input1,
+                                       *m_Output);
 }
 
 } //namespace armnn
diff --git a/src/backends/reference/workloads/RefElementwiseWorkload.hpp b/src/backends/reference/workloads/RefElementwiseWorkload.hpp
index ee0d80b..264ddce 100644
--- a/src/backends/reference/workloads/RefElementwiseWorkload.hpp
+++ b/src/backends/reference/workloads/RefElementwiseWorkload.hpp
@@ -21,8 +21,8 @@
 class RefElementwiseWorkload : public BaseWorkload<ParentDescriptor>
 {
 public:
-    using InType = typename ElementwiseFunction<Functor>::InType;
-    using OutType = typename ElementwiseFunction<Functor>::OutType;
+    using InType = typename ElementwiseBinaryFunction<Functor>::InType;
+    using OutType = typename ElementwiseBinaryFunction<Functor>::OutType;
     using BaseWorkload<ParentDescriptor>::m_Data;
 
     RefElementwiseWorkload(const ParentDescriptor& descriptor, const WorkloadInfo& info);
diff --git a/src/backends/reference/workloads/RefRsqrtWorkload.cpp b/src/backends/reference/workloads/RefRsqrtWorkload.cpp
deleted file mode 100644
index fd6b9a3..0000000
--- a/src/backends/reference/workloads/RefRsqrtWorkload.cpp
+++ /dev/null
@@ -1,37 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "RefRsqrtWorkload.hpp"
-
-#include "Decoders.hpp"
-#include "Encoders.hpp"
-#include "RefWorkloadUtils.hpp"
-#include "Rsqrt.hpp"
-
-#include <Profiling.hpp>
-
-namespace armnn
-{
-
-void RefRsqrtWorkload::Execute() const
-{
-    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefRsqrtWorkload_Execute");
-
-    const TensorInfo& inputTensorInfo = GetTensorInfo(m_Data.m_Inputs[0]);
-
-    std::unique_ptr<Decoder<float>> decoderPtr = MakeDecoder<float>(inputTensorInfo, m_Data.m_Inputs[0]->Map());
-    Decoder<float>& decoder = *decoderPtr;
-
-    const TensorInfo& outputTensorInfo = GetTensorInfo(m_Data.m_Outputs[0]);
-
-    std::unique_ptr<Encoder<float>> encoderPtr = MakeEncoder<float>(outputTensorInfo, m_Data.m_Outputs[0]->Map());
-    Encoder<float>& encoder = *encoderPtr;
-
-    Rsqrt(decoder,
-          encoder,
-          GetTensorInfo(m_Data.m_Inputs[0]));
-}
-
-} //namespace armnn
diff --git a/src/backends/reference/workloads/RefRsqrtWorkload.hpp b/src/backends/reference/workloads/RefRsqrtWorkload.hpp
deleted file mode 100644
index 6c8ad5b..0000000
--- a/src/backends/reference/workloads/RefRsqrtWorkload.hpp
+++ /dev/null
@@ -1,21 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <backendsCommon/Workload.hpp>
-#include <backendsCommon/WorkloadData.hpp>
-
-namespace armnn
-{
-
-class RefRsqrtWorkload : public BaseWorkload<RsqrtQueueDescriptor>
-{
-public:
-    using BaseWorkload<RsqrtQueueDescriptor>::BaseWorkload;
-    virtual void Execute() const override;
-};
-
-} //namespace armnn
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index 1f9ad4a..7034b67 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -5,7 +5,6 @@
 
 #pragma once
 
-#include "Abs.hpp"
 #include "Activation.hpp"
 #include "ArgMinMax.hpp"
 #include "BatchNormImpl.hpp"
@@ -15,7 +14,6 @@
 #include "FullyConnected.hpp"
 #include "Gather.hpp"
 #include "Pooling2d.hpp"
-#include "RefAbsWorkload.hpp"
 #include "RefActivationWorkload.hpp"
 #include "RefArgMinMaxWorkload.hpp"
 #include "RefBatchNormalizationWorkload.hpp"
@@ -33,6 +31,7 @@
 #include "RefDetectionPostProcessWorkload.hpp"
 #include "RefDequantizeWorkload.hpp"
 #include "RefElementwiseWorkload.hpp"
+#include "RefElementwiseUnaryWorkload.hpp"
 #include "RefFullyConnectedWorkload.hpp"
 #include "RefFloorWorkload.hpp"
 #include "RefFakeQuantizationFloat32Workload.hpp"
@@ -51,7 +50,6 @@
 #include "RefReshapeWorkload.hpp"
 #include "RefResizeBilinearWorkload.hpp"
 #include "RefResizeWorkload.hpp"
-#include "RefRsqrtWorkload.hpp"
 #include "RefSliceWorkload.hpp"
 #include "RefSplitterWorkload.hpp"
 #include "RefSoftmaxWorkload.hpp"
diff --git a/src/backends/reference/workloads/Rsqrt.cpp b/src/backends/reference/workloads/Rsqrt.cpp
deleted file mode 100644
index 5abc2c8..0000000
--- a/src/backends/reference/workloads/Rsqrt.cpp
+++ /dev/null
@@ -1,25 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "Rsqrt.hpp"
-
-#include <cmath>
-
-namespace armnn
-{
-
-void Rsqrt(Decoder<float>& in,
-           Encoder<float>& out,
-           const TensorInfo& tensorInfo)
-{
-    for (unsigned int i = 0; i < tensorInfo.GetNumElements(); ++i)
-    {
-        out[i];
-        in[i];
-        out.Set(1.f / sqrtf(in.Get()));
-    }
-}
-
-} //namespace armnn
\ No newline at end of file
diff --git a/src/backends/reference/workloads/Rsqrt.hpp b/src/backends/reference/workloads/Rsqrt.hpp
index ffc6b18..47ebcf3 100644
--- a/src/backends/reference/workloads/Rsqrt.hpp
+++ b/src/backends/reference/workloads/Rsqrt.hpp
@@ -1,19 +1,22 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
-#include "BaseIterator.hpp"
-#include <armnn/Tensor.hpp>
-#include <armnn/Types.hpp>
+#pragma once
+
+#include <iostream>
 
 namespace armnn
 {
-
-/// Performs the reciprocal squareroot function elementwise
-/// on the inputs to give the outputs.
-void Rsqrt(Decoder<float>& in,
-           Encoder<float>& out,
-           const TensorInfo& tensorInfo);
+    template<typename T>
+struct rsqrt : public std::unary_function<T, T>
+    {
+        T
+        operator () (const T&  inputData) const
+        {
+            return 1 / std::sqrt(inputData);
+        }
+    };
 
 } //namespace armnn
diff --git a/src/backends/reference/workloads/Sqrt.hpp b/src/backends/reference/workloads/Sqrt.hpp
new file mode 100644
index 0000000..e4ff6a4
--- /dev/null
+++ b/src/backends/reference/workloads/Sqrt.hpp
@@ -0,0 +1,22 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <iostream>
+
+namespace armnn
+{
+    template<typename T>
+struct sqrt : public std::unary_function<T, T>
+    {
+        T
+        operator () (const T&  inputData) const
+        {
+            return std::sqrt(inputData);
+        }
+    };
+
+} //namespace armnn