IVGCVSW-4931 Update NN Driver to support dynamic tensors

 * Change NN Driver m_Network to now have ShapeInferenceMethod::InferAndValidate
 * Implement dynamic tensor support for:
    - ArgMinMax layer
    - Pooling2d layer
    - Activation layer
 * Skip dynamic tensor tests for any HAL other than 1.3

Change-Id: Icf66c968e49cdd4822b8c79c5f18b3f9e97dc53f
Signed-off-by: Finn Williams <Finn.Williams@Arm.com>
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
diff --git a/ArmnnPreparedModel.cpp b/ArmnnPreparedModel.cpp
index f990d3b..8aa28d7 100644
--- a/ArmnnPreparedModel.cpp
+++ b/ArmnnPreparedModel.cpp
@@ -48,7 +48,7 @@
 
         for (unsigned int d = 0; d < tensorInfo.GetNumDimensions(); ++d)
         {
-            if (requestArg.dimensions[d] != tensorInfo.GetShape()[d])
+            if (requestArg.dimensions[d] != 0 && requestArg.dimensions[d] != tensorInfo.GetShape()[d])
             {
                 ALOGE("Mismatched size for dimension %d (request argument: %u, expected %u)",
                     d, requestArg.dimensions[d], tensorInfo.GetShape()[d]);
diff --git a/ArmnnPreparedModel_1_2.cpp b/ArmnnPreparedModel_1_2.cpp
index 76ef426..dd5bdae 100644
--- a/ArmnnPreparedModel_1_2.cpp
+++ b/ArmnnPreparedModel_1_2.cpp
@@ -80,7 +80,7 @@
 
         for (unsigned int d = 0; d < tensorInfo.GetNumDimensions(); ++d)
         {
-            if (requestArg.dimensions[d] != tensorInfo.GetShape()[d])
+            if (requestArg.dimensions[d] != 0 && requestArg.dimensions[d] != tensorInfo.GetShape()[d])
             {
                 ALOGE("Mismatched size for dimension %d (request argument: %u, expected %u)",
                       d, requestArg.dimensions[d], tensorInfo.GetShape()[d]);
@@ -259,10 +259,17 @@
         }
 
         const size_t outputSize = outputTensorInfo.GetNumBytes();
+
+        if (outputArg.location.length < outputSize)
+        {
+            ALOGW("ArmnnPreparedModel_1_2::Execute failed: outputArg.location.length < outputSize");
+            return V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
+        }
+
         const size_t bufferSize = memPools.at(outputArg.location.poolIndex).getHidlMemory().size();
         if (bufferSize < outputSize)
         {
-            ALOGW("ArmnnPreparedModel_1_2::Execute failed");
+            ALOGW("ArmnnPreparedModel_1_2::Execute failed: bufferSize < outputSize");
             return V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
         }
 
diff --git a/ArmnnPreparedModel_1_3.cpp b/ArmnnPreparedModel_1_3.cpp
index c7adc6c..a27c7a3 100644
--- a/ArmnnPreparedModel_1_3.cpp
+++ b/ArmnnPreparedModel_1_3.cpp
@@ -101,7 +101,7 @@
 
         for (unsigned int d = 0; d < tensorInfo.GetNumDimensions(); ++d)
         {
-            if (requestArg.dimensions[d] != tensorInfo.GetShape()[d])
+            if (requestArg.dimensions[d] != 0 && requestArg.dimensions[d] != tensorInfo.GetShape()[d])
             {
                 ALOGE("Mismatched size for dimension %d (request argument: %u, expected %u)",
                       d, requestArg.dimensions[d], tensorInfo.GetShape()[d]);
@@ -309,6 +309,13 @@
         ALOGW("ArmnnPreparedModel_1_3::executeFenced parameter loopTimeoutDuration is set but not supported.");
     }
 
+    if (!android::nn::validateRequest(request, m_Model, /*allowUnspecifiedOutput=*/false))
+    {
+        ALOGV("ArmnnPreparedModel_1_3::executeFenced outputs must be specified for fenced execution ");
+        cb(ErrorStatus::INVALID_ARGUMENT, hidl_handle(nullptr), nullptr);
+        return Void();
+    }
+
     ExecutionContext_1_3 ctx;
     if (measureTiming == MeasureTiming::YES)
     {
@@ -319,12 +326,6 @@
     ALOGV("ArmnnPreparedModel_1_3::executeFenced(): %s", GetModelSummary(m_Model).c_str());
     m_RequestCount++;
 
-    if (!android::nn::validateRequest(request, m_Model))
-    {
-        cb(ErrorStatus::INVALID_ARGUMENT, hidl_handle(nullptr), nullptr);
-        return Void();
-    }
-
     if (!m_RequestInputsAndOutputsDumpDir.empty())
     {
         ALOGD("Dumping inputs and outputs for request %" PRIuPTR, reinterpret_cast<std::uintptr_t>(&cb));
@@ -442,7 +443,7 @@
     {
         const auto& outputArg = request.outputs[i];
 
-        const armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i);
+        armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i);
         const armnn::Tensor outputTensor = GetTensorForRequestArgument(outputArg, outputTensorInfo, memPools);
         if (outputTensor.GetMemoryArea() == nullptr)
         {
@@ -450,16 +451,40 @@
             return V1_3::ErrorStatus::GENERAL_FAILURE;
         }
 
+        unsigned int count = 0;
+        std::for_each(outputArg.dimensions.begin(), outputArg.dimensions.end(), [&](auto dim)
+        {
+            if (dim != 0)
+            {
+                outputTensorInfo.GetShape()[count] = dim;
+            }
+            else
+            {
+                outputTensorInfo.GetShape()[count] = outputArg.dimensions.size();
+            }
+
+            count++;
+        });
+
         const size_t outputSize = outputTensorInfo.GetNumBytes();
+
+        outputs.emplace_back(i, outputTensor);
+        outputShapes[i] = ComputeShape(outputTensorInfo);
+
+        if (outputArg.location.length < outputSize)
+        {
+            ALOGW("ArmnnPreparedModel_1_3::Execute failed");
+            outputShapes[i].isSufficient = false;
+            return V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
+        }
+
         const size_t bufferSize = memPools.at(outputArg.location.poolIndex).getHidlMemory().size();
         if (bufferSize < outputSize)
         {
             ALOGW("ArmnnPreparedModel_1_3::Execute failed");
+            outputShapes[i].isSufficient = false;
             return V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
         }
-
-        outputs.emplace_back(i, outputTensor);
-        outputShapes[i] = ComputeShape(outputTensorInfo);
     }
 
     return V1_3::ErrorStatus::NONE;
diff --git a/ConversionUtils.cpp b/ConversionUtils.cpp
index 4c77396..b03ffbd 100644
--- a/ConversionUtils.cpp
+++ b/ConversionUtils.cpp
@@ -38,6 +38,15 @@
     }
 }
 
+void LayerInputHandle::Disconnect(armnn::IInputSlot& inputSlot)
+{
+    ARMNN_ASSERT(IsValid());
+    if (m_OutputSlot)
+    {
+        m_OutputSlot->Disconnect(inputSlot);
+    }
+}
+
 const armnn::TensorInfo& LayerInputHandle::GetTensorInfo() const
 {
     return m_TensorInfo;
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index 5dc9993..474d1a5 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -62,6 +62,8 @@
 
     void Connect(armnn::IInputSlot& inputSlot);
 
+    void Disconnect(armnn::IInputSlot& inputSlot);
+
     const armnn::TensorInfo& GetTensorInfo() const;
 
 private:
@@ -1380,7 +1382,8 @@
                                   uint32_t layerOutputIndex,
                                   const HalModel& model,
                                   ConversionData& data,
-                                  const armnn::TensorInfo* overrideOutputInfo = nullptr)
+                                  const armnn::TensorInfo* overrideOutputInfo = nullptr,
+                                  const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr)
 {
     using HalOperand = typename HalPolicy::Operand;
 
@@ -1392,18 +1395,39 @@
 
     armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
 
-    const uint32_t operandIndex = operation.outputs[operationOutputIndex];
-    data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
-
-    if (overrideOutputInfo == nullptr)
+    bool isSupported = false;
+    if (validateFunc &&
+        layer.GetInputSlot(0).GetConnection() &&
+        IsDynamicTensor(outputSlot.GetTensorInfo()))
     {
-        outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
+        outputSlot.IsTensorInfoSet();
+        validateFunc(outputSlot.GetTensorInfo(), isSupported);
+
+       if(!isSupported)
+       {
+           for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
+           {
+               layer.GetInputSlot(inputSlotIndex).GetConnection()->Disconnect(layer.GetInputSlot(inputSlotIndex));
+           }
+
+           return false;
+       }
     }
     else
     {
-        outputSlot.SetTensorInfo(*overrideOutputInfo);
+        if (overrideOutputInfo == nullptr)
+        {
+            outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
+        }
+        else
+        {
+            outputSlot.SetTensorInfo(*overrideOutputInfo);
+        }
     }
 
+    const uint32_t operandIndex = operation.outputs[operationOutputIndex];
+    data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
+
     return true;
 }
 
@@ -1452,7 +1476,8 @@
                                   armnn::IConnectableLayer& layer,
                                   const HalModel& model,
                                   ConversionData& data,
-                                  const armnn::TensorInfo* overrideOutputInfo = nullptr)
+                                  const armnn::TensorInfo* overrideOutputInfo = nullptr,
+                                  const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr)
 {
     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
                                                    outputIndex,
@@ -1460,7 +1485,8 @@
                                                    outputIndex,
                                                    model,
                                                    data,
-                                                   overrideOutputInfo);
+                                                   overrideOutputInfo,
+                                                   validateFunc);
 }
 
 template<typename HalPolicy,
@@ -1487,19 +1513,29 @@
     }
 
     const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
-    if (IsDynamicTensor(outInfo))
-    {
-        return Fail("%s: Dynamic output tensors are not supported", __func__);
-    }
 
     bool isSupported = false;
-    FORWARD_LAYER_SUPPORT_FUNC(__func__,
-                               IsActivationSupported,
-                               data.m_Backends,
-                               isSupported,
-                               input.GetTensorInfo(),
-                               outInfo,
-                               activationDesc);
+
+    auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsActivationSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   input.GetTensorInfo(),
+                                   outInfo,
+                                   activationDesc);
+    };
+
+    if(IsDynamicTensor(outInfo))
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+    else
+    {
+        validateFunc(outInfo, isSupported);
+    }
+
     if (!isSupported)
     {
         return false;
@@ -1509,7 +1545,7 @@
     ARMNN_ASSERT(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
-    return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
+    return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
 }
 
 template<typename HalPolicy,
@@ -1634,11 +1670,6 @@
     const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
 
-    if (IsDynamicTensor(outputInfo))
-    {
-        return Fail("%s: Dynamic output tensors are not supported", __func__);
-    }
-
     armnn::Pooling2dDescriptor desc;
     desc.m_PoolType = poolType;
     desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
@@ -1697,13 +1728,28 @@
     }
 
     bool isSupported = false;
-    FORWARD_LAYER_SUPPORT_FUNC(__func__,
-                               IsPooling2dSupported,
-                               data.m_Backends,
-                               isSupported,
-                               inputInfo,
-                               outputInfo,
-                               desc);
+
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsPooling2dSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo,
+                                   outputInfo,
+                                   desc);
+
+    };
+
+    if(IsDynamicTensor(outputInfo))
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+    else
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+
     if (!isSupported)
     {
         return false;
@@ -1723,7 +1769,12 @@
 
     input.Connect(pooling2dLayer->GetInputSlot(0));
 
-    return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data, nullptr, validateFunc);
 }
 
 template<typename HalPolicy,
@@ -1842,23 +1893,33 @@
     const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
 
     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-    if (IsDynamicTensor(outputInfo))
-    {
-        return Fail("%s: Dynamic output tensors are not supported", __func__);
-    }
 
     armnn::ArgMinMaxDescriptor descriptor;
     descriptor.m_Function = argMinMaxFunction;
     descriptor.m_Axis     = axis;
 
     bool isSupported = false;
-    FORWARD_LAYER_SUPPORT_FUNC(__func__,
-                               IsArgMinMaxSupported,
-                               data.m_Backends,
-                               isSupported,
-                               inputInfo0,
-                               outputInfo,
-                               descriptor);
+
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsArgMinMaxSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo0,
+                                   outputInfo,
+                                   descriptor);
+    };
+
+    if(IsDynamicTensor(outputInfo))
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+    else
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+
     if (!isSupported)
     {
         return false;
@@ -1869,7 +1930,7 @@
 
     input0.Connect(layer->GetInputSlot(0));
 
-    return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
+    return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
 }
 
 template<typename HalPolicy,
diff --git a/ModelToINetworkConverter.cpp b/ModelToINetworkConverter.cpp
index 24fb490..0d6ddc3 100644
--- a/ModelToINetworkConverter.cpp
+++ b/ModelToINetworkConverter.cpp
@@ -59,8 +59,17 @@
         totalPoolSize += pool.size();
     }
 
+    using NetworkOptions = std::vector<armnn::BackendOptions>;
+    NetworkOptions networkOptions;
+    armnn::BackendOptions shapeInferenceMethodOption("ShapeInferenceMethod",
+                                                    {
+                                                            { "InferAndValidate", true }
+                                                    });
+
+    networkOptions.push_back(shapeInferenceMethodOption);
+
     // Create armnn::INetwork
-    m_Data.m_Network = armnn::INetwork::Create();
+    m_Data.m_Network = armnn::INetwork::Create(networkOptions);
 
     // add operations to it
     // track which layer outputs each operand
diff --git a/Utils.cpp b/Utils.cpp
index 8a2812a..db1b6e6 100644
--- a/Utils.cpp
+++ b/Utils.cpp
@@ -80,7 +80,8 @@
 
 armnn::TensorInfo GetTensorInfoForOperand(const V1_0::Operand& operand)
 {
-    armnn::DataType type;
+    using namespace armnn;
+    DataType type;
 
     switch (operand.type)
     {
@@ -97,7 +98,30 @@
             throw UnsupportedOperand<V1_0::OperandType>(operand.type);
     }
 
-    armnn::TensorInfo ret(operand.dimensions.size(), operand.dimensions.data(), type);
+    TensorInfo ret;
+    if (operand.dimensions.size() == 0)
+    {
+        TensorShape tensorShape(Dimensionality::NotSpecified);
+        ret = TensorInfo(tensorShape, type);
+    }
+    else
+    {
+        bool dimensionsSpecificity[5] = { true, true, true, true, true };
+        int count = 0;
+        std::for_each(operand.dimensions.data(),
+                      operand.dimensions.data() +  operand.dimensions.size(),
+                      [&](const unsigned int val)
+                      {
+                          if (val == 0)
+                          {
+                              dimensionsSpecificity[count] = false;
+                          }
+                          count++;
+                      });
+
+        TensorShape tensorShape(operand.dimensions.size(), operand.dimensions.data(), dimensionsSpecificity);
+        ret = TensorInfo(tensorShape, type);
+    }
 
     ret.SetQuantizationScale(operand.scale);
     ret.SetQuantizationOffset(operand.zeroPoint);
@@ -143,7 +167,31 @@
             throw UnsupportedOperand<V1_2::OperandType>(operand.type);
     }
 
-    TensorInfo ret(operand.dimensions.size(), operand.dimensions.data(), type);
+    TensorInfo ret;
+    if (operand.dimensions.size() == 0)
+    {
+        TensorShape tensorShape(Dimensionality::NotSpecified);
+        ret = TensorInfo(tensorShape, type);
+    }
+    else
+    {
+        bool dimensionsSpecificity[5] = { true, true, true, true, true };
+        int count = 0;
+        std::for_each(operand.dimensions.data(),
+                      operand.dimensions.data() +  operand.dimensions.size(),
+                      [&](const unsigned int val)
+                      {
+                          if (val == 0)
+                          {
+                              dimensionsSpecificity[count] = false;
+                          }
+                          count++;
+                      });
+
+        TensorShape tensorShape(operand.dimensions.size(), operand.dimensions.data(), dimensionsSpecificity);
+        ret = TensorInfo(tensorShape, type);
+    }
+
     if (perChannel)
     {
         // ExtraParams is expected to be of type channelQuant
@@ -219,7 +267,29 @@
     }
     else
     {
-        ret = TensorInfo(operand.dimensions.size(), operand.dimensions.data(), type);
+        if (operand.dimensions.size() == 0)
+        {
+            TensorShape tensorShape(Dimensionality::NotSpecified);
+            ret = TensorInfo(tensorShape, type);
+        }
+        else
+        {
+            bool dimensionsSpecificity[5] = { true, true, true, true, true };
+            int count = 0;
+            std::for_each(operand.dimensions.data(),
+                          operand.dimensions.data() +  operand.dimensions.size(),
+                          [&](const unsigned int val)
+                          {
+                              if (val == 0)
+                              {
+                                  dimensionsSpecificity[count] = false;
+                              }
+                              count++;
+                          });
+
+            TensorShape tensorShape(operand.dimensions.size(), operand.dimensions.data(), dimensionsSpecificity);
+            ret = TensorInfo(tensorShape, type);
+        }
     }
 
     if (perChannel)
@@ -501,10 +571,22 @@
     return fileName;
 }
 
-bool IsDynamicTensor(const armnn::TensorInfo& outputInfo)
+bool IsDynamicTensor(const armnn::TensorInfo& tensorInfo)
 {
-    // Dynamic tensors have at least one 0-sized dimension
-    return outputInfo.GetNumElements() == 0u;
+    if (tensorInfo.GetShape().GetDimensionality() == armnn::Dimensionality::NotSpecified)
+    {
+        return true;
+    }
+    return !tensorInfo.GetShape().AreAllDimensionsSpecified();
+}
+
+bool AreDynamicTensorsSupported()
+{
+#if defined(ARMNN_ANDROID_NN_V1_3)
+    return true;
+#else
+    return false;
+#endif
 }
 
 std::string GetFileTimestamp()
@@ -568,7 +650,4 @@
 #endif
     }
 }
-
-
-
 } // namespace armnn_driver
diff --git a/Utils.hpp b/Utils.hpp
index d58d273..86eb6db 100644
--- a/Utils.hpp
+++ b/Utils.hpp
@@ -143,6 +143,9 @@
 /// Checks if a tensor info represents a dynamic tensor
 bool IsDynamicTensor(const armnn::TensorInfo& outputInfo);
 
+/// Checks for ArmNN support of dynamic tensors.
+bool AreDynamicTensorsSupported(void);
+
 std::string GetFileTimestamp();
 
 #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)