Merge "Revert "IVGCVSW-5090 Add android-nn-driver Support for Logical operators"" into sc-dev
diff --git a/1.3/HalPolicy.cpp b/1.3/HalPolicy.cpp
index 9333759..53b67c7 100644
--- a/1.3/HalPolicy.cpp
+++ b/1.3/HalPolicy.cpp
@@ -81,12 +81,6 @@
             return ConvertComparison(operation, model, data, ComparisonOperation::LessOrEqual);
         case V1_3::OperationType::LOCAL_RESPONSE_NORMALIZATION:
             return ConvertLocalResponseNormalization(operation, model, data);
-        case V1_3::OperationType::LOGICAL_AND:
-            return ConvertLogicalBinary(operation, model, data, LogicalBinaryOperation::LogicalAnd);
-        case V1_3::OperationType::LOGICAL_NOT:
-            return ConvertElementwiseUnary(operation, model, data, UnaryOperation::LogicalNot);
-        case V1_3::OperationType::LOGICAL_OR:
-            return ConvertLogicalBinary(operation, model, data, LogicalBinaryOperation::LogicalOr);
         case V1_3::OperationType::LOGISTIC:
             return ConvertLogistic(operation, model, data);
         case V1_3::OperationType::LOG_SOFTMAX:
@@ -320,15 +314,6 @@
     return ::ConvertLocalResponseNormalization<hal_1_3::HalPolicy>(operation, model, data);
 }
 
-bool HalPolicy::ConvertLogicalBinary(const Operation& operation,
-                                     const Model& model,
-                                     ConversionData& data,
-                                     armnn::LogicalBinaryOperation logicalOperation)
-{
-    ALOGV("hal_1_3::HalPolicy::ConvertLogicalBinary()");
-    return ::ConvertLogicalBinary<hal_1_3::HalPolicy>(operation, model, data, logicalOperation);
-}
-
 bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_3::HalPolicy::ConvertLogistic()");
diff --git a/1.3/HalPolicy.hpp b/1.3/HalPolicy.hpp
index f82a5ef..0eb5f4d 100644
--- a/1.3/HalPolicy.hpp
+++ b/1.3/HalPolicy.hpp
@@ -95,11 +95,6 @@
                                                   const Model& model,
                                                   ConversionData& data);
 
-    static bool ConvertLogicalBinary(const Operation& operation,
-                                     const Model& model,
-                                     ConversionData& data,
-                                     armnn::LogicalBinaryOperation logicalOperation);
-
     static bool ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data);
 
     static bool ConvertLogSoftmax(const Operation& operation, const Model& model, ConversionData& data);
diff --git a/ConversionUtils_1_3.hpp b/ConversionUtils_1_3.hpp
index 150735e..a7f00fc 100644
--- a/ConversionUtils_1_3.hpp
+++ b/ConversionUtils_1_3.hpp
@@ -153,79 +153,6 @@
 template<typename HalPolicy,
          typename HalOperation = typename HalPolicy::Operation,
          typename HalModel     = typename HalPolicy::Model>
-bool ConvertLogicalBinary(const HalOperation& operation,
-                          const HalModel& model,
-                          ConversionData& data,
-                          LogicalBinaryOperation logicalOperation)
-{
-    using HalOperand = typename HalPolicy::Operand;
-
-    ALOGV("HalPolicy::ConvertLogicalBinary()");
-    ALOGV("logicalOperation = %s", GetLogicalBinaryOperationAsCString(logicalOperation));
-
-    LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
-    LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
-
-    if (!(input0.IsValid() && input1.IsValid()))
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
-    if (!output)
-    {
-        return Fail("%s: Could not read output 0", __func__);
-    }
-
-    const TensorInfo& inputInfo0 = input0.GetTensorInfo();
-    const TensorInfo& inputInfo1 = input1.GetTensorInfo();
-    const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-
-    LogicalBinaryDescriptor descriptor(logicalOperation);
-
-    bool isSupported = false;
-
-    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
-    {
-        FORWARD_LAYER_SUPPORT_FUNC(__func__,
-                                   IsLogicalBinarySupported,
-                                   data.m_Backends,
-                                   isSupported,
-                                   inputInfo0,
-                                   inputInfo1,
-                                   outputInfo,
-                                   descriptor);
-    };
-
-    if(!IsDynamicTensor(outputInfo))
-    {
-        validateFunc(outputInfo, isSupported);
-    }
-    else
-    {
-        isSupported = AreDynamicTensorsSupported();
-    }
-
-    if (!isSupported)
-    {
-        return false;
-    }
-
-    IConnectableLayer* layer = data.m_Network->AddLogicalBinaryLayer(descriptor);
-    assert(layer != nullptr);
-
-    bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
-    if (!isReshapeSupported)
-    {
-        return false;
-    }
-
-    return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
-}
-
-template<typename HalPolicy,
-         typename HalOperation = typename HalPolicy::Operation,
-         typename HalModel     = typename HalPolicy::Model>
 bool ConvertQuantizedLstm(const HalOperation& operation, const HalModel& model, ConversionData& data)
 {
     using HalOperand     = typename HalPolicy::Operand;
diff --git a/NnapiSupport.txt b/NnapiSupport.txt
index e0400e1..2b6eaca 100644
--- a/NnapiSupport.txt
+++ b/NnapiSupport.txt
@@ -43,9 +43,6 @@
 LESS                         (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
 LESS_EQUAL                   (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
 LOCAL_RESPONSE_NORMALIZATION (FLOAT32)
-LOGICAL_AND                  (BOOL8)
-LOGICAL_NOT                  (BOOL8)
-LOGICAL_OR                   (BOOL8)
 LOGISTIC                     (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
 LOG_SOFTMAX                  (FLOAT32)
 LSTM                         (FLOAT32)