IVGCVSW-3586 Fix Skipped Space_To_Batch Hal 1.2 VTS Failures

Signed-off-by: Finn Williams <Finn.Williams@arm.com>
Change-Id: I9d0d0a75d54c25075d8d87c6265e350486157f5c
diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp
index 2fab474..e75b5c2 100644
--- a/1.1/HalPolicy.cpp
+++ b/1.1/HalPolicy.cpp
@@ -252,94 +252,7 @@
 bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_1::HalPolicy::ConvertSpaceToBatchNd()");
-
-    LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
-    if (!input.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
-    unsigned int rank = inputInfo.GetNumDimensions();
-    unsigned int spatialDim = rank - 2;
-
-    if (rank != 4)
-    {
-        Fail("%s: Only inputs with rank 4 are supported", __func__);
-    }
-
-    const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
-    if (!output)
-    {
-        return Fail("%s: Could not read output 0", __func__);
-    }
-
-    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-    if (IsDynamicTensor(outputInfo))
-    {
-        return Fail("%s: Dynamic output tensors are not supported", __func__);
-    }
-
-    const Operand* blockShapeOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model);
-    const Operand* paddingsOperand   = GetInputOperand<hal_1_1::HalPolicy>(operation, 2, model);
-
-    armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
-    if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
-    {
-        return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
-    }
-
-    std::vector<int32_t> blockShape;
-    GetTensorInt32Values<hal_1_1::HalPolicy>(*blockShapeOperand, blockShape, model, data);
-    if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
-    {
-        return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
-    }
-
-    armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
-    if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
-    {
-        return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
-    }
-
-    std::vector<std::pair<unsigned int, unsigned int>> paddingList;
-    std::vector<int32_t> paddings;
-    GetTensorInt32Values<hal_1_1::HalPolicy>(*paddingsOperand, paddings, model, data);
-    for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
-    {
-        int paddingBeforeInput = paddings[i];
-        int paddingAfterInput = paddings[i + 1];
-        if (paddingBeforeInput < 0 || paddingAfterInput < 0)
-        {
-            return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
-        }
-
-        paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
-    }
-
-    armnn::SpaceToBatchNdDescriptor descriptor;
-    descriptor.m_DataLayout = armnn::DataLayout::NHWC;
-    descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
-    descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
-
-    bool isSupported = false;
-    FORWARD_LAYER_SUPPORT_FUNC(__func__,
-                               IsSpaceToBatchNdSupported,
-                               data.m_Backends,
-                               isSupported,
-                               inputInfo,
-                               outputInfo,
-                               descriptor);
-    if (!isSupported)
-    {
-        return false;
-    }
-
-    armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
-    assert(layer != nullptr);
-    input.Connect(layer->GetInputSlot(0));
-
-    return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
+    return ::ConvertSpaceToBatchNd<hal_1_1::HalPolicy>(operation, model, data);
 }
 
 bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index 03a6446..af310c9 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -57,7 +57,6 @@
     {
         case V1_1::OperationType::DIV:
         case V1_1::OperationType::MEAN:
-        case V1_1::OperationType::SPACE_TO_BATCH_ND:
         case V1_1::OperationType::SQUEEZE:
         case V1_1::OperationType::STRIDED_SLICE:
         case V1_1::OperationType::TRANSPOSE:
@@ -163,6 +162,8 @@
             return ConvertTransposeConv2d(operation, model, data);
         case V1_2::OperationType::SOFTMAX:
             return ConvertSoftmax(operation, model, data);
+        case V1_2::OperationType::SPACE_TO_BATCH_ND  :
+            return ConvertSpaceToBatchNd(operation, model, data);
         case V1_2::OperationType::SPACE_TO_DEPTH:
             return ConvertSpaceToDepth(operation, model, data);
         case V1_2::OperationType::SUB:
@@ -938,6 +939,12 @@
     return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
 }
 
+bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data)
+{
+    ALOGV("hal_1_2::HalPolicy::ConvertSpaceToBatchNd()");
+    return ::ConvertSpaceToBatchNd<hal_1_2::HalPolicy>(operation, model, data);
+}
+
 bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_2::HalPolicy::ConvertSpaceToDepth()");
diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp
index 409f7b4..8b8d501 100644
--- a/1.2/HalPolicy.hpp
+++ b/1.2/HalPolicy.hpp
@@ -68,6 +68,8 @@
 
     static bool ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data);
 
+    static bool ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data);
+
     static bool ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data);
 
     static bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data);
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index 2b2a8ce..0349999 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -1885,4 +1885,104 @@
     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
 }
 
+template<typename HalPolicy,
+         typename HalOperation = typename HalPolicy::Operation,
+         typename HalOperand   = typename HalPolicy::Operand,
+         typename HalModel     = typename HalPolicy::Model>
+bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
+{
+    LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
+    unsigned int rank = inputInfo.GetNumDimensions();
+    unsigned int spatialDim = rank - 2;
+
+    if (rank != 4)
+    {
+        Fail("%s: Only inputs with rank 4 are supported", __func__);
+    }
+
+    const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+    if (IsDynamicTensor(outputInfo))
+    {
+        return Fail("%s: Dynamic output tensors are not supported", __func__);
+    }
+
+    const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
+    const HalOperand* paddingsOperand   = GetInputOperand<HalPolicy>(operation, 2, model);
+
+    armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
+    if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
+    {
+        return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
+    }
+
+    std::vector<int32_t> blockShape;
+    GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data);
+    if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
+    {
+        return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
+    }
+
+    armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
+    if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
+    {
+        return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
+    }
+
+    std::vector<std::pair<unsigned int, unsigned int>> paddingList;
+    std::vector<int32_t> paddings;
+    GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
+    for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
+    {
+        int paddingBeforeInput = paddings[i];
+        int paddingAfterInput = paddings[i + 1];
+        if (paddingBeforeInput < 0 || paddingAfterInput < 0)
+        {
+            return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
+        }
+
+        paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
+    }
+
+    armnn::SpaceToBatchNdDescriptor descriptor;
+    descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+    descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
+    descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
+
+    if (Is12Operand(*output))
+    {
+        descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
+    }
+
+    bool isSupported = false;
+    FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                               IsSpaceToBatchNdSupported,
+                               data.m_Backends,
+                               isSupported,
+                               inputInfo,
+                               outputInfo,
+                               descriptor);
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
+}
+
 } // namespace armnn_driver