IVGCVSW-4988 Add handling output shape parameter to TransposeConvolution2d

* Add m_OutputShape and m_OutputShapeEnabled to
  TransposeConvolution2dDescriptor.
* Update TfLite parser to populate m_OutputShape if found in the model.
  Handle both Signed32 from tflite files and QAsymmU8 from test fixtures.
* Update TransposeConvolution2dLayer to use m_OutputShape instead of
  InferOutputShapes if specified.

Signed-off-by: Colm Donelan <Colm.Donelan@arm.com>
Change-Id: Ia6933065375eb8006c916f1ca67c38dc50bc205c
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index 60aa219..241b23d 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -1203,37 +1203,43 @@
         m_StrideX(0),
         m_StrideY(0),
         m_BiasEnabled(false),
-        m_DataLayout(DataLayout::NCHW)
+        m_DataLayout(DataLayout::NCHW),
+        m_OutputShapeEnabled(false)
     {}
 
     bool operator ==(const TransposeConvolution2dDescriptor& rhs) const
     {
-        return m_PadLeft     == rhs.m_PadLeft &&
-               m_PadRight    == rhs.m_PadRight &&
-               m_PadTop      == rhs.m_PadTop &&
-               m_PadBottom   == rhs.m_PadBottom &&
-               m_StrideX     == rhs.m_StrideX &&
-               m_StrideY     == rhs.m_StrideY &&
-               m_BiasEnabled == rhs.m_BiasEnabled &&
-               m_DataLayout  == rhs.m_DataLayout;
+        return m_PadLeft            == rhs.m_PadLeft &&
+               m_PadRight           == rhs.m_PadRight &&
+               m_PadTop             == rhs.m_PadTop &&
+               m_PadBottom          == rhs.m_PadBottom &&
+               m_StrideX            == rhs.m_StrideX &&
+               m_StrideY            == rhs.m_StrideY &&
+               m_BiasEnabled        == rhs.m_BiasEnabled &&
+               m_DataLayout         == rhs.m_DataLayout &&
+               m_OutputShapeEnabled == rhs.m_OutputShapeEnabled &&
+               m_OutputShape        == rhs.m_OutputShape;
     }
 
     /// Padding left value in the width dimension.
-    uint32_t   m_PadLeft;
+    uint32_t                  m_PadLeft;
     /// Padding right value in the width dimension.
-    uint32_t   m_PadRight;
+    uint32_t                  m_PadRight;
     /// Padding top value in the height dimension.
-    uint32_t   m_PadTop;
+    uint32_t                  m_PadTop;
     /// Padding bottom value in the height dimension.
-    uint32_t   m_PadBottom;
+    uint32_t                  m_PadBottom;
     /// Stride value when proceeding through input for the width dimension.
-    uint32_t   m_StrideX;
+    uint32_t                  m_StrideX;
     /// Stride value when proceeding through input for the height dimension.
-    uint32_t   m_StrideY;
+    uint32_t                  m_StrideY;
     /// Enable/disable bias.
-    bool       m_BiasEnabled;
+    bool                      m_BiasEnabled;
     /// The data layout to be used (NCHW, NHWC).
-    DataLayout m_DataLayout;
+    DataLayout                m_DataLayout;
+    /// Output shape if it has been specified.
+    bool                      m_OutputShapeEnabled;
+    std::vector<unsigned int> m_OutputShape;
 };
 
 /// A TransposeDescriptor for the TransposeLayer.
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp
index ffe92bb..8a26425 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.cpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp
@@ -111,16 +111,26 @@
 
     ARMNN_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weight data cannot be null.");
 
-    auto inferredShapes = InferOutputShapes({
-         GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
-         m_Weight->GetTensorInfo().GetShape() });
+    std::vector<TensorShape> expectedOutputShape;
+    // If output_shape was specified then use it rather than calculate an inferred output shape.
+    if (m_Param.m_OutputShapeEnabled)
+    {
+        TensorShape shapeAsTensorShape(static_cast<unsigned int>(m_Param.m_OutputShape.size()),
+            m_Param.m_OutputShape.data());
+        expectedOutputShape.push_back(shapeAsTensorShape);
+    }
+    else
+    {
+        expectedOutputShape = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
+                                                 m_Weight->GetTensorInfo().GetShape() });
+    }
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(expectedOutputShape.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "TransposeConvolution2dLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
         GetOutputSlot(0).GetTensorInfo().GetShape(),
-        inferredShapes[0]);
+        expectedOutputShape[0]);
 }
 
 Layer::ConstantTensors TransposeConvolution2dLayer::GetConstantTensorsByRef()
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index bad2504..1b93aad 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -1082,6 +1082,28 @@
     auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
     CHECK_VALID_SIZE(outputs.size(), 1);
 
+    if (inputs[0])
+    {
+        armnn::TensorInfo tensorInfo = ToTensorInfo(inputs[0]);
+        std::vector<int> output_shape(tensorInfo.GetNumElements());
+        if (tensorInfo.GetDataType() == DataType::Signed32)
+        {
+            ::memcpy(output_shape.data(), GetBuffer(m_Model, inputs[0]->buffer)->data.data(), tensorInfo.GetNumBytes());
+        }
+        if (tensorInfo.GetDataType() == DataType::QAsymmU8)
+        {
+            for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++)
+            {
+                output_shape[i] = GetBuffer(m_Model, inputs[0]->buffer)->data.data()[i];
+            }
+        }
+        // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
+        for (int dimension : output_shape)
+        {
+            desc.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
+        }
+        desc.m_OutputShapeEnabled = true;
+    }
     armnn::TensorInfo inputTensorInfo  = ToTensorInfo(inputs[2]);
     armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);