IVGCVSW-5090 Add android-nn-driver Support for Logical operators

 * Add ConvertLogicalBinary
 * Add support for LOGICAL_AND, LOGICAL_NOT, LOGICAL_OR

Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: I4dcecbebe031915b8c237fd5b9b92c8844847fb7
diff --git a/1.3/HalPolicy.cpp b/1.3/HalPolicy.cpp
index 37deec3..5e456b8 100644
--- a/1.3/HalPolicy.cpp
+++ b/1.3/HalPolicy.cpp
@@ -81,6 +81,12 @@
             return ConvertComparison(operation, model, data, ComparisonOperation::LessOrEqual);
         case V1_3::OperationType::LOCAL_RESPONSE_NORMALIZATION:
             return ConvertLocalResponseNormalization(operation, model, data);
+        case V1_3::OperationType::LOGICAL_AND:
+            return ConvertLogicalBinary(operation, model, data, LogicalBinaryOperation::LogicalAnd);
+        case V1_3::OperationType::LOGICAL_NOT:
+            return ConvertElementwiseUnary(operation, model, data, UnaryOperation::LogicalNot);
+        case V1_3::OperationType::LOGICAL_OR:
+            return ConvertLogicalBinary(operation, model, data, LogicalBinaryOperation::LogicalOr);
         case V1_3::OperationType::LOGISTIC:
             return ConvertLogistic(operation, model, data);
         case V1_3::OperationType::LOG_SOFTMAX:
@@ -314,6 +320,15 @@
     return ::ConvertLocalResponseNormalization<hal_1_3::HalPolicy>(operation, model, data);
 }
 
+bool HalPolicy::ConvertLogicalBinary(const Operation& operation,
+                                     const Model& model,
+                                     ConversionData& data,
+                                     armnn::LogicalBinaryOperation logicalOperation)
+{
+    ALOGV("hal_1_3::HalPolicy::ConvertLogicalBinary()");
+    return ::ConvertLogicalBinary<hal_1_3::HalPolicy>(operation, model, data, logicalOperation);
+}
+
 bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_3::HalPolicy::ConvertLogistic()");
diff --git a/1.3/HalPolicy.hpp b/1.3/HalPolicy.hpp
index 0eb5f4d..f82a5ef 100644
--- a/1.3/HalPolicy.hpp
+++ b/1.3/HalPolicy.hpp
@@ -95,6 +95,11 @@
                                                   const Model& model,
                                                   ConversionData& data);
 
+    static bool ConvertLogicalBinary(const Operation& operation,
+                                     const Model& model,
+                                     ConversionData& data,
+                                     armnn::LogicalBinaryOperation logicalOperation);
+
     static bool ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data);
 
     static bool ConvertLogSoftmax(const Operation& operation, const Model& model, ConversionData& data);
diff --git a/ConversionUtils_1_3.hpp b/ConversionUtils_1_3.hpp
index a7f00fc..150735e 100644
--- a/ConversionUtils_1_3.hpp
+++ b/ConversionUtils_1_3.hpp
@@ -153,6 +153,79 @@
 template<typename HalPolicy,
          typename HalOperation = typename HalPolicy::Operation,
          typename HalModel     = typename HalPolicy::Model>
+bool ConvertLogicalBinary(const HalOperation& operation,
+                          const HalModel& model,
+                          ConversionData& data,
+                          LogicalBinaryOperation logicalOperation)
+{
+    using HalOperand = typename HalPolicy::Operand;
+
+    ALOGV("HalPolicy::ConvertLogicalBinary()");
+    ALOGV("logicalOperation = %s", GetLogicalBinaryOperationAsCString(logicalOperation));
+
+    LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
+    LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
+
+    if (!(input0.IsValid() && input1.IsValid()))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const TensorInfo& inputInfo0 = input0.GetTensorInfo();
+    const TensorInfo& inputInfo1 = input1.GetTensorInfo();
+    const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    LogicalBinaryDescriptor descriptor(logicalOperation);
+
+    bool isSupported = false;
+
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsLogicalBinarySupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo0,
+                                   inputInfo1,
+                                   outputInfo,
+                                   descriptor);
+    };
+
+    if(!IsDynamicTensor(outputInfo))
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+    else
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    IConnectableLayer* layer = data.m_Network->AddLogicalBinaryLayer(descriptor);
+    assert(layer != nullptr);
+
+    bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
+    if (!isReshapeSupported)
+    {
+        return false;
+    }
+
+    return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+template<typename HalPolicy,
+         typename HalOperation = typename HalPolicy::Operation,
+         typename HalModel     = typename HalPolicy::Model>
 bool ConvertQuantizedLstm(const HalOperation& operation, const HalModel& model, ConversionData& data)
 {
     using HalOperand     = typename HalPolicy::Operand;
diff --git a/NnapiSupport.txt b/NnapiSupport.txt
index 2b6eaca..e0400e1 100644
--- a/NnapiSupport.txt
+++ b/NnapiSupport.txt
@@ -43,6 +43,9 @@
 LESS                         (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
 LESS_EQUAL                   (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
 LOCAL_RESPONSE_NORMALIZATION (FLOAT32)
+LOGICAL_AND                  (BOOL8)
+LOGICAL_NOT                  (BOOL8)
+LOGICAL_OR                   (BOOL8)
 LOGISTIC                     (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
 LOG_SOFTMAX                  (FLOAT32)
 LSTM                         (FLOAT32)