IVGCVSW-3319 Add frontend support for TransposeConvolution2d Layer

Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: Ic06f63f1eff255e697facf319e2ac4c83d782e7c
diff --git a/Android.mk b/Android.mk
index 7f3080b..b08a4ae 100644
--- a/Android.mk
+++ b/Android.mk
@@ -135,6 +135,7 @@
         src/armnn/layers/StridedSliceLayer.cpp \
         src/armnn/layers/SubtractionLayer.cpp \
         src/armnn/layers/SwitchLayer.cpp \
+        src/armnn/layers/TransposeConvolution2dLayer.cpp \
         src/armnn/Descriptors.cpp \
         src/armnn/Exceptions.cpp \
         src/armnn/Graph.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 2971b0d..f07295f 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -318,6 +318,8 @@
     src/armnn/layers/SubtractionLayer.hpp
     src/armnn/layers/SwitchLayer.cpp
     src/armnn/layers/SwitchLayer.hpp
+    src/armnn/layers/TransposeConvolution2dLayer.cpp
+    src/armnn/layers/TransposeConvolution2dLayer.hpp
     src/armnn/BackendSettings.hpp
     src/armnn/CompatibleTypes.hpp
     src/armnn/Descriptors.cpp
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index dd1991d..2fda8c1 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -684,4 +684,36 @@
     unsigned int m_NumOutputSlots;
 };
 
+/// A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
+struct TransposeConvolution2dDescriptor
+{
+    TransposeConvolution2dDescriptor() :
+        m_PadLeft(0),
+        m_PadRight(0),
+        m_PadTop(0),
+        m_PadBottom(0),
+        m_StrideX(0),
+        m_StrideY(0),
+        m_BiasEnabled(false),
+        m_DataLayout(DataLayout::NCHW)
+    {}
+
+    /// Padding left value in the width dimension.
+    uint32_t   m_PadLeft;
+    /// Padding right value in the width dimension.
+    uint32_t   m_PadRight;
+    /// Padding top value in the height dimension.
+    uint32_t   m_PadTop;
+    /// Padding bottom value in the height dimension.
+    uint32_t   m_PadBottom;
+    /// Stride value when proceeding through input for the width dimension.
+    uint32_t   m_StrideX;
+    /// Stride value when proceeding through input for the height dimension.
+    uint32_t   m_StrideY;
+    /// Enable/disable bias.
+    bool       m_BiasEnabled;
+    /// The data layout to be used (NCHW, NHWC).
+    DataLayout m_DataLayout;
+};
+
 } // namespace armnn
\ No newline at end of file
diff --git a/include/armnn/DescriptorsFwd.hpp b/include/armnn/DescriptorsFwd.hpp
index 1c75c25..b814d48 100644
--- a/include/armnn/DescriptorsFwd.hpp
+++ b/include/armnn/DescriptorsFwd.hpp
@@ -30,6 +30,7 @@
 struct SpaceToBatchNdDescriptor;
 struct SpaceToDepthDescriptor;
 struct StridedSliceDescriptor;
+struct TransposeConvolution2dDescriptor;
 struct ViewsDescriptor;
 
 // MergerDescriptor is deprecated use ConcatDescriptor instead
diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp
index 324a9f5..eb581d3 100644
--- a/include/armnn/ILayerSupport.hpp
+++ b/include/armnn/ILayerSupport.hpp
@@ -295,6 +295,15 @@
                                    const TensorInfo& output0,
                                    const TensorInfo& output1,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+
+    virtual bool IsTransposeConvolution2dSupported(
+        const TensorInfo& input,
+        const TensorInfo& output,
+        const TransposeConvolution2dDescriptor& descriptor,
+        const TensorInfo& weights,
+        const Optional<TensorInfo>& biases,
+        Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+
 }; // class ILayerSupport
 
 using ILayerSupportSharedPtr = std::shared_ptr<ILayerSupport>;
diff --git a/include/armnn/ILayerVisitor.hpp b/include/armnn/ILayerVisitor.hpp
index 9519c8b..c98760c 100644
--- a/include/armnn/ILayerVisitor.hpp
+++ b/include/armnn/ILayerVisitor.hpp
@@ -363,7 +363,7 @@
 
     /// Function a strided slice layer should call back to when its Accept(ILayerVisitor&) function is invoked.
     /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param StridedSliceDescriptor - Parameters for the strided slice operation.
+    /// @param stridedSliceDescriptor - Parameters for the strided slice operation.
     /// @param name - Optional name for the layer.
     virtual void VisitStridedSliceLayer(const IConnectableLayer* layer,
                                         const StridedSliceDescriptor& stridedSliceDescriptor,
@@ -381,6 +381,19 @@
     virtual void VisitSwitchLayer(const IConnectableLayer* layer,
                                   const char* name = nullptr) = 0;
 
+    /// Function that a 2D transpose convolution layer should call back to when its Accept(ILayerVisitor&)
+    /// function is invoked.
+    /// @param layer - pointer to the layer which is calling back to this visit function.
+    /// @param descriptor - Description of the 2D transpose convolution layer.
+    /// @param weights - Tensor for the weights data.
+    /// @param biases - Optional tensor for the bias data.
+    /// @param name - Optional name for the layer.
+    virtual void VisitTransposeConvolution2dLayer(const IConnectableLayer* layer,
+                                                  const TransposeConvolution2dDescriptor& descriptor,
+                                                  const ConstTensor& weights,
+                                                  const Optional<ConstTensor>& biases,
+                                                  const char* name = nullptr) = 0;
+
     virtual void StartVisit() {}
     virtual void FinishVisit() {}
 
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index cacca33..af67764 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -432,6 +432,17 @@
     /// @return - Interface for configuring the layer.
     virtual IConnectableLayer* AddPreluLayer(const char* name = nullptr) = 0;
 
+    /// Adds a 2D transpose convolution layer to the network.
+    /// @param descriptor - Description of the 2D transpose convolution layer.
+    /// @param weights - Tensor for the weights data.
+    /// @param biases - Optional tensor for the bias data.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    virtual IConnectableLayer* AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
+                                                              const ConstTensor& weights,
+                                                              const Optional<ConstTensor>& biases,
+                                                              const char* name = nullptr) = 0;
+
     virtual void Accept(ILayerVisitor& visitor) const = 0;
 
 protected:
diff --git a/include/armnn/LayerSupport.hpp b/include/armnn/LayerSupport.hpp
index 673193f..f0dca77 100644
--- a/include/armnn/LayerSupport.hpp
+++ b/include/armnn/LayerSupport.hpp
@@ -381,4 +381,14 @@
                        const TensorInfo& output1,
                        char* reasonIfUnsupported = nullptr,
                        size_t reasonIfUnsupportedMaxLength = 1024);
+
+/// Deprecated in favor of IBackend and ILayerSupport interfaces
+bool IsTransposeConvolution2dSupported(const BackendId& backend,
+                                       const TensorInfo& input,
+                                       const TensorInfo& output,
+                                       const TransposeConvolution2dDescriptor& descriptor,
+                                       const TensorInfo& weights,
+                                       const Optional<TensorInfo>& biases,
+                                       char* reasonIfUnsupported = nullptr,
+                                       size_t reasonIfUnsupportedMaxLength = 1024);
 }
diff --git a/include/armnn/LayerVisitorBase.hpp b/include/armnn/LayerVisitorBase.hpp
index 48fc2bb..8406efe 100644
--- a/include/armnn/LayerVisitorBase.hpp
+++ b/include/armnn/LayerVisitorBase.hpp
@@ -193,6 +193,13 @@
 
     void VisitSwitchLayer(const IConnectableLayer*,
                           const char*) override { DefaultPolicy::Apply(__func__); }
+
+    void VisitTransposeConvolution2dLayer(const IConnectableLayer*,
+                                          const TransposeConvolution2dDescriptor&,
+                                          const ConstTensor&,
+                                          const Optional<ConstTensor>&,
+                                          const char*) override { DefaultPolicy::Apply(__func__); }
+
 };
 
-} //namespace armnn
+} // namespace armnn
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index a1434ea..dc3dc17 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -60,9 +60,10 @@
     Splitter,
     StridedSlice,
     Subtraction,
+    Switch,
     // Last layer goes here.
     LastLayer,
-    Switch = LastLayer
+    TransposeConvolution2d = LastLayer
 };
 
 const char* GetLayerTypeAsCString(LayerType type);
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index a801431..9837cd3 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -53,6 +53,7 @@
 #include "layers/StridedSliceLayer.hpp"
 #include "layers/SubtractionLayer.hpp"
 #include "layers/SwitchLayer.hpp"
+#include "layers/TransposeConvolution2dLayer.hpp"
 
 namespace armnn
 {
@@ -128,5 +129,6 @@
 DECLARE_LAYER(StridedSlice)
 DECLARE_LAYER(Subtraction)
 DECLARE_LAYER(Switch)
+DECLARE_LAYER(TransposeConvolution2d)
 
 }
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 75b63e4..9436fc6 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1008,6 +1008,28 @@
     return m_Graph->AddLayer<PreluLayer>(name);
 }
 
+IConnectableLayer* Network::AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
+                                                           const ConstTensor& weights,
+                                                           const Optional<ConstTensor>& biases,
+                                                           const char* name)
+{
+    if (descriptor.m_BiasEnabled && !biases.has_value())
+    {
+        throw InvalidArgumentException("AddTransposeConvolution2dLayer: Biases cannot be empty");
+    }
+
+    const auto layer = m_Graph->AddLayer<TransposeConvolution2dLayer>(descriptor, name);
+
+    layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
+
+    if (descriptor.m_BiasEnabled)
+    {
+        layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
+    }
+
+    return layer;
+}
+
 void Network::Accept(ILayerVisitor& visitor) const
 {
     for (auto layer : GetGraph())
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index e1379d0..b90e3c2 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -187,6 +187,11 @@
 
     IConnectableLayer* AddPreluLayer(const char* name = nullptr) override;
 
+    IConnectableLayer* AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
+                                                      const ConstTensor& weights,
+                                                      const Optional<ConstTensor>& biases,
+                                                      const char* name = nullptr) override;
+
     void Accept(ILayerVisitor& visitor) const override;
 
 private:
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp
new file mode 100644
index 0000000..69f598d
--- /dev/null
+++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp
@@ -0,0 +1,130 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "TransposeConvolution2dLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+#include <DataLayoutIndexed.hpp>
+
+using namespace armnnUtils;
+
+namespace armnn
+{
+
+TransposeConvolution2dLayer::TransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& param,
+                                                         const char* name)
+    : LayerWithParameters(1, 1, LayerType::TransposeConvolution2d, param, name)
+{
+}
+
+std::unique_ptr<IWorkload> TransposeConvolution2dLayer::CreateWorkload(const Graph& graph,
+                                                                       const IWorkloadFactory& factory) const
+{
+    BOOST_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weights data should not be null.");
+
+    TransposeConvolution2dQueueDescriptor descriptor;
+    descriptor.m_Weight = m_Weight.get();
+
+    if (m_Param.m_BiasEnabled)
+    {
+        BOOST_ASSERT_MSG(m_Bias != nullptr, "TransposeConvolution2dLayer: Bias data should not be null.");
+        descriptor.m_Bias = m_Bias.get();
+    }
+
+    return factory.CreateTransposeConvolution2d(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+TransposeConvolution2dLayer* TransposeConvolution2dLayer::Clone(Graph& graph) const
+{
+    auto layer = CloneBase<TransposeConvolution2dLayer>(graph, m_Param, GetName());
+
+    layer->m_Weight = m_Weight ? std::make_unique<ScopedCpuTensorHandle>(*m_Weight) : nullptr;
+
+    if (layer->m_Param.m_BiasEnabled)
+    {
+        layer->m_Bias = m_Bias ? std::make_unique<ScopedCpuTensorHandle>(*m_Bias) : nullptr;
+    }
+
+    return std::move(layer);
+}
+
+std::vector<TensorShape> TransposeConvolution2dLayer::InferOutputShapes(
+    const std::vector<TensorShape>& inputShapes) const
+{
+    BOOST_ASSERT(inputShapes.size() == 2);
+    const TensorShape& inputShape  = inputShapes[0];
+    const TensorShape& kernelShape = inputShapes[1];
+
+    BOOST_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Transpose convolutions will always have 4D input");
+
+    DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
+
+    unsigned int inBatchSize = inputShape[0];
+    unsigned int inWidth     = inputShape[dataLayoutIndex.GetWidthIndex()];
+    unsigned int inHeight    = inputShape[dataLayoutIndex.GetHeightIndex()];
+    unsigned int inChannels  = inputShape[dataLayoutIndex.GetChannelsIndex()];
+
+    unsigned int kernelWidth  = kernelShape[dataLayoutIndex.GetWidthIndex()];
+    unsigned int kernelHeight = kernelShape[dataLayoutIndex.GetHeightIndex()];
+
+    unsigned int totalPaddingX = m_Param.m_PadLeft + m_Param.m_PadRight;
+    unsigned int totalPaddingY = m_Param.m_PadTop + m_Param.m_PadBottom;
+
+    unsigned int outWidth  = m_Param.m_StrideX * (inWidth  + 1) - totalPaddingX + kernelWidth;
+    unsigned int outHeight = m_Param.m_StrideY * (inHeight + 1) - totalPaddingY + kernelHeight;
+
+    unsigned int outChannels  = inChannels;
+    unsigned int outBatchSize = inBatchSize;
+
+    TensorShape tensorShape = m_Param.m_DataLayout == armnn::DataLayout::NHWC ?
+         TensorShape( { outBatchSize, outHeight, outWidth, outChannels } ) :
+         TensorShape( { outBatchSize, outChannels, outHeight, outWidth });
+
+    return std::vector<TensorShape>({ tensorShape });
+}
+
+void TransposeConvolution2dLayer::ValidateTensorShapesFromInputs()
+{
+    VerifyLayerConnections(1, CHECK_LOCATION());
+
+    BOOST_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weight data cannot be null.");
+
+    auto inferredShapes = InferOutputShapes({
+         GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
+         m_Weight->GetTensorInfo().GetShape() });
+
+    BOOST_ASSERT(inferredShapes.size() == 1);
+
+    ConditionalThrowIfNotEqual<LayerValidationException>(
+        "TransposeConvolution2dLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+        GetOutputSlot(0).GetTensorInfo().GetShape(),
+        inferredShapes[0]);
+}
+
+Layer::ConstantTensors TransposeConvolution2dLayer::GetConstantTensorsByRef()
+{
+    return {m_Weight, m_Bias};
+}
+
+void TransposeConvolution2dLayer::Accept(ILayerVisitor& visitor) const
+{
+    ConstTensor weightsTensor(m_Weight->GetTensorInfo(), m_Weight->Map(true)) ;
+    Optional<ConstTensor> optionalBiasTensor = EmptyOptional();
+
+    if (GetParameters().m_BiasEnabled)
+    {
+        ConstTensor biasTensor(m_Bias->GetTensorInfo(), m_Bias->Map(true));
+        optionalBiasTensor = Optional<ConstTensor>(biasTensor);
+    }
+
+    visitor.VisitTransposeConvolution2dLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName());
+}
+
+} // namespace armnn
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.hpp b/src/armnn/layers/TransposeConvolution2dLayer.hpp
new file mode 100644
index 0000000..4dc4644
--- /dev/null
+++ b/src/armnn/layers/TransposeConvolution2dLayer.hpp
@@ -0,0 +1,59 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+class ScopedCpuTensorHandle;
+
+/// This layer represents a 2D transpose convolution operation.
+class TransposeConvolution2dLayer : public LayerWithParameters<TransposeConvolution2dDescriptor>
+{
+public:
+    /// A unique pointer to store weight values.
+    std::unique_ptr<ScopedCpuTensorHandle> m_Weight;
+    /// A unique pointer to store bias values.
+    std::unique_ptr<ScopedCpuTensorHandle> m_Bias;
+
+    /// Makes a workload for the TransposeConvolution2d type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
+    virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
+                                                      const IWorkloadFactory& factory) const override;
+
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
+    TransposeConvolution2dLayer* Clone(Graph& graph) const override;
+
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref TransposeConvolution2dLayer.
+    void ValidateTensorShapesFromInputs() override;
+
+    /// Infers the output shapes from given input shapes and layer properties.
+    /// @param [in] inputShapes The input shapes the layer has.
+    /// @return A vector of the inferred output shape.
+    std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+    void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+    /// Constructor to create a TransposeConvolution2dLayer.
+    /// @param [in] param TransposeConvolution2dDescriptor to configure the 2D transpose convolution operation.
+    /// @param [in] name Optional name for the layer.
+    TransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& param, const char* name);
+
+    /// Default destructor
+    ~TransposeConvolution2dLayer() = default;
+
+    /// Retrieve the handles to the constant values stored by the layer.
+    /// @return A vector of the constant tensors stored by this layer.
+    ConstantTensors GetConstantTensorsByRef() override;
+};
+
+} // namespace armnn
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index 81231e4..dabe977 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -943,6 +943,16 @@
     CreateAnyLayer(fbSwitchLayer.o, serializer::Layer::Layer_SwitchLayer);
 }
 
+void SerializerVisitor::VisitTransposeConvolution2dLayer(
+    const armnn::IConnectableLayer* layer,
+    const armnn::TransposeConvolution2dDescriptor& descriptor,
+    const armnn::ConstTensor& weights,
+    const armnn::Optional<armnn::ConstTensor>& biases,
+    const char* name)
+{
+    throw UnimplementedException("SerializerVisitor::VisitTransposeConvolution2dLayer is not implemented");
+}
+
 fb::Offset<serializer::LayerBase> SerializerVisitor::CreateLayerBase(const IConnectableLayer* layer,
                                                                      const serializer::LayerType layerType)
 {
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index aae8799..31f7d05 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -206,6 +206,13 @@
 
     void VisitSwitchLayer(const armnn::IConnectableLayer* layer,
                           const char* name = nullptr) override;
+
+    void VisitTransposeConvolution2dLayer(const armnn::IConnectableLayer* layer,
+                                          const armnn::TransposeConvolution2dDescriptor& descriptor,
+                                          const armnn::ConstTensor& weights,
+                                          const armnn::Optional<armnn::ConstTensor>& biases,
+                                          const char* = nullptr) override;
+
 private:
 
     /// Creates the Input Slots and Output Slots and LayerBase for the layer.
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 12e4ee8..2eb0e41 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -448,4 +448,14 @@
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
+bool LayerSupportBase::IsTransposeConvolution2dSupported(const TensorInfo& input,
+                                                         const TensorInfo& output,
+                                                         const TransposeConvolution2dDescriptor& descriptor,
+                                                         const TensorInfo& weights,
+                                                         const Optional<TensorInfo>& biases,
+                                                         Optional<std::string&> reasonIfUnsupported) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
 } // namespace armnn
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index d035dfc..52ba5b2 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -282,6 +282,14 @@
                            const TensorInfo& output0,
                            const TensorInfo& output1,
                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsTransposeConvolution2dSupported(
+        const TensorInfo& input,
+        const TensorInfo& output,
+        const TransposeConvolution2dDescriptor& descriptor,
+        const TensorInfo& weights,
+        const Optional<TensorInfo>& biases,
+        Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 };
 
 } // namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index adba86c..5ca4928 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -1800,4 +1800,45 @@
                                        "alpha");
 }
 
+void TransposeConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+    const std::string descriptorName{"TransposeConvolution2dQueueDescriptor"};
+
+    ValidateNumInputs(workloadInfo,  descriptorName, 1);
+    ValidateNumOutputs(workloadInfo, descriptorName, 1);
+
+    ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], descriptorName, 4, "input");
+    ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], descriptorName, 4, "output");
+
+    ValidatePointer(m_Weight, descriptorName, "weight");
+    ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), descriptorName, 4, "weight");
+
+    ValidateTensorDataType(m_Weight->GetTensorInfo(),
+                           workloadInfo.m_InputTensorInfos[0].GetDataType(),
+                           descriptorName,
+                           "weight");
+
+    if (m_Parameters.m_BiasEnabled)
+    {
+        ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), descriptorName, 1, "bias");
+
+        ValidateTensorDataType(m_Bias->GetTensorInfo(),
+                               GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
+                               descriptorName, "bias");
+
+        ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
+                                       workloadInfo.m_InputTensorInfos[0],
+                                       m_Weight->GetTensorInfo(),
+                                       descriptorName);
+    }
+
+    ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0],
+                                         m_Weight->GetTensorInfo(),
+                                         workloadInfo.m_OutputTensorInfos[0],
+                                         descriptorName,
+                                         "input",
+                                         "weights",
+                                         "output");
+}
+
 } //namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index 6a51bc3..7447583 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -445,4 +445,17 @@
     void Validate(const WorkloadInfo& workloadInfo) const;
 };
 
+struct TransposeConvolution2dQueueDescriptor : QueueDescriptorWithParameters<TransposeConvolution2dDescriptor>
+{
+    TransposeConvolution2dQueueDescriptor() :
+        m_Weight(nullptr),
+        m_Bias(nullptr)
+    {}
+
+    const ConstCpuTensorHandle* m_Weight;
+    const ConstCpuTensorHandle* m_Bias;
+
+    void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
 } //namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index cca3919..2fba3b7 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -796,6 +796,36 @@
                                                           reason);
             break;
         }
+        case LayerType::TransposeConvolution2d:
+        {
+            auto cLayer = boost::polymorphic_downcast<const TransposeConvolution2dLayer*>(&layer);
+
+            const TensorInfo input  = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
+                                                       dataType);
+            const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
+
+            const TransposeConvolution2dDescriptor& descriptor  = cLayer->GetParameters();
+
+            Optional<TensorInfo> biases;
+            if (descriptor.m_BiasEnabled)
+            {
+                BOOST_ASSERT(cLayer->m_Bias.get() != nullptr);
+                biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
+                                          GetBiasTypeFromWeightsType(dataType));
+            }
+
+            BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
+            const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
+
+            result = layerSupportObject->IsTransposeConvolution2dSupported(input,
+                                                                           output,
+                                                                           descriptor,
+                                                                           weights,
+                                                                           biases,
+                                                                           reason);
+
+            break;
+        }
         default:
         {
             BOOST_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
@@ -1098,4 +1128,11 @@
     return std::unique_ptr<IWorkload>();
 }
 
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateTransposeConvolution2d(
+    const TransposeConvolution2dQueueDescriptor& descriptor,
+    const WorkloadInfo& info) const
+{
+    return std::unique_ptr<IWorkload>();
 }
+
+} // namepsace armnn
\ No newline at end of file
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index c9fbe71..978d3a3 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -190,6 +190,10 @@
 
     virtual std::unique_ptr<IWorkload> CreateSwitch(const SwitchQueueDescriptor& descriptor,
                                                     const WorkloadInfo& Info) const;
+
+    virtual std::unique_ptr<IWorkload> CreateTransposeConvolution2d(
+        const TransposeConvolution2dQueueDescriptor& descriptor,
+        const WorkloadInfo& info) const;
 };
 
-} //namespace armnn
+} // namespace armnn
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 111cf8f..7c9d0f5 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -212,6 +212,12 @@
 {
 };
 
+template<>
+struct DummyLayer<armnn::TransposeConvolution2dLayer>
+    : public DummyConvolutionLayer<armnn::TransposeConvolution2dLayer>
+{
+};
+
 template <typename LstmLayerType>
 struct DummyLstmLayer
 {
@@ -408,6 +414,8 @@
 
 DECLARE_LAYER_POLICY_1_PARAM(Switch)
 
+DECLARE_LAYER_POLICY_2_PARAM(TransposeConvolution2d)
+
 
 // Generic implementation to get the number of input slots for a given layer type;
 template<armnn::LayerType Type>