MLCE-110 Propagate error from armnn EnqueueWorkload

Change-Id: Ic53b1cdbdd3a7d656932651c74911940affc09b6
Signed-off-by: Matthew Bentham <matthew.bentham@arm.com>
diff --git a/ArmnnDriverImpl.cpp b/ArmnnDriverImpl.cpp
index a3c2e10..f6456ee 100644
--- a/ArmnnDriverImpl.cpp
+++ b/ArmnnDriverImpl.cpp
@@ -208,7 +208,10 @@
 
     // Run a single 'dummy' inference of the model. This means that CL kernels will get compiled (and tuned if
     // this is enabled) before the first 'real' inference which removes the overhead of the first inference.
-    preparedModel->ExecuteWithDummyInputs();
+    if (!preparedModel->ExecuteWithDummyInputs())
+    {
+        return FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "Network could not be executed", cb);
+    }
 
     if (clTunedParameters &&
         options.GetClTunedParametersMode() == armnn::IGpuAccTunedParameters::Mode::UpdateTunedParameters)
diff --git a/ArmnnPreparedModel.cpp b/ArmnnPreparedModel.cpp
index d7f727f..edb1c93 100644
--- a/ArmnnPreparedModel.cpp
+++ b/ArmnnPreparedModel.cpp
@@ -239,7 +239,13 @@
     // run it
     try
     {
-        m_Runtime->EnqueueWorkload(m_NetworkId, *pInputTensors, *pOutputTensors);
+        armnn::Status status = m_Runtime->EnqueueWorkload(m_NetworkId, *pInputTensors, *pOutputTensors);
+        if (status != armnn::Status::Success)
+        {
+            ALOGW("EnqueueWorkload failed");
+            NotifyCallbackAndCheck(callback, ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::ExecuteGraph");
+            return;
+        }
     }
     catch (armnn::Exception& e)
     {
@@ -262,7 +268,7 @@
 }
 
 template<typename HalVersion>
-void ArmnnPreparedModel<HalVersion>::ExecuteWithDummyInputs()
+bool ArmnnPreparedModel<HalVersion>::ExecuteWithDummyInputs()
 {
     std::vector<std::vector<char>> storage;
     armnn::InputTensors inputTensors;
@@ -287,12 +293,19 @@
 
     try
     {
-        m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors);
+        armnn::Status status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors);
+        if (status != armnn::Status::Success)
+        {
+            ALOGW("ExecuteWithDummyInputs: EnqueueWorkload failed");
+            return false;
+        }
     }
     catch (armnn::Exception& e)
     {
         ALOGW("ExecuteWithDummyInputs: armnn::Exception caught from EnqueueWorkload: %s", e.what());
+        return false;
     }
+    return true;
 }
 
 ///
diff --git a/ArmnnPreparedModel.hpp b/ArmnnPreparedModel.hpp
index 3c4b32b..f6008b8 100644
--- a/ArmnnPreparedModel.hpp
+++ b/ArmnnPreparedModel.hpp
@@ -42,7 +42,8 @@
                       const ::android::sp<IExecutionCallback>& callback);
 
     /// Executes this model with dummy inputs (e.g. all zeroes).
-    void ExecuteWithDummyInputs();
+    /// \return false on failure, otherwise true
+    bool ExecuteWithDummyInputs();
 
 private:
     template <typename TensorBindingCollection>