IVGCVSW-2508 Add no-op factory implementations and layer for Gather operator
 * Added GatherQueueDescriptor to WorkloadData
 * Added CreateGather function in WorkloadFactory.hpp
 * Added stub implementation of the CreateGreater function in workload factories
 * Added GatherLayer stub implementation
 * Added AddGatherLayer to Network
 * Added IsGatherSupported to LayerSupportBase

Change-Id: I0408fd54e88a7d4e3d9e1c2811a9323f0da52a04
diff --git a/Android.mk b/Android.mk
index c61c710..62a992d 100644
--- a/Android.mk
+++ b/Android.mk
@@ -98,6 +98,7 @@
         src/armnn/layers/FakeQuantizationLayer.cpp \
         src/armnn/layers/FloorLayer.cpp \
         src/armnn/layers/FullyConnectedLayer.cpp \
+        src/armnn/layers/GatherLayer.cpp \
         src/armnn/layers/GreaterLayer.cpp \
         src/armnn/layers/InputLayer.cpp \
         src/armnn/layers/L2NormalizationLayer.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 755f408..9651f07 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -202,6 +202,8 @@
     src/armnn/layers/FloorLayer.cpp
     src/armnn/layers/FullyConnectedLayer.hpp
     src/armnn/layers/FullyConnectedLayer.cpp
+    src/armnn/layers/GatherLayer.cpp
+    src/armnn/layers/GatherLayer.hpp
     src/armnn/layers/GreaterLayer.cpp
     src/armnn/layers/GreaterLayer.hpp
     src/armnn/layers/InputLayer.hpp
diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp
index 929896d..8d800f4 100644
--- a/include/armnn/ILayerSupport.hpp
+++ b/include/armnn/ILayerSupport.hpp
@@ -137,6 +137,11 @@
                                  const TensorInfo* cellToOutputWeights,
                                  Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
 
+    virtual bool IsGatherSupported(const TensorInfo& input0,
+                                   const TensorInfo& input1,
+                                   const TensorInfo& output,
+                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+
     virtual bool IsMaximumSupported(const TensorInfo& input0,
                                     const TensorInfo& input1,
                                     const TensorInfo& output,
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index f31176a..05962b9 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -343,6 +343,11 @@
     /// @ return - Interface for configuring the layer.
     virtual IConnectableLayer* AddRsqrtLayer(const char* name = nullptr) = 0;
 
+    /// Add Gather layer to the network.
+    /// @param name - Optional name for the layer.
+    /// @ return - Interface for configuring the layer.
+    virtual IConnectableLayer* AddGatherLayer(const char* name = nullptr) = 0;
+
 protected:
     ~INetwork() {}
 };
diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp
index 16a1972..15f4aa0 100644
--- a/src/armnn/InternalTypes.cpp
+++ b/src/armnn/InternalTypes.cpp
@@ -29,6 +29,7 @@
         case LayerType::FakeQuantization: return "FakeQuantization";
         case LayerType::Floor: return "Floor";
         case LayerType::FullyConnected: return "FullyConnected";
+        case LayerType::Gather: return "Gather";
         case LayerType::Greater: return "Greater";
         case LayerType::Input: return "Input";
         case LayerType::L2Normalization: return "L2Normalization";
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index f05ea25..704efdf 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -29,6 +29,7 @@
     FakeQuantization,
     Floor,
     FullyConnected,
+    Gather,
     Greater,
     Input,
     L2Normalization,
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index b0b3ecc..b600e4d 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -245,6 +245,16 @@
     FORWARD_LAYER_SUPPORT_FUNC(backend, IsFullyConnectedSupported, input, output, weights, biases, descriptor);
 }
 
+bool IsGatherSupported(const BackendId& backend,
+                       const TensorInfo& input0,
+                       const TensorInfo& input1,
+                       const TensorInfo& output,
+                       char* reasonIfUnsupported,
+                       size_t reasonIfUnsupportedMaxLength)
+{
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsGatherSupported, input0, input1, output);
+}
+
 bool IsGreaterSupported(const BackendId& backend,
                         const TensorInfo& input0,
                         const TensorInfo& input1,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index 8b4ee08..27806c5 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -21,6 +21,7 @@
 #include "layers/FakeQuantizationLayer.hpp"
 #include "layers/FloorLayer.hpp"
 #include "layers/FullyConnectedLayer.hpp"
+#include "layers/GatherLayer.hpp"
 #include "layers/GreaterLayer.hpp"
 #include "layers/InputLayer.hpp"
 #include "layers/L2NormalizationLayer.hpp"
@@ -88,6 +89,7 @@
 DECLARE_LAYER(FakeQuantization)
 DECLARE_LAYER(Floor)
 DECLARE_LAYER(FullyConnected)
+DECLARE_LAYER(Gather)
 DECLARE_LAYER(Greater)
 DECLARE_LAYER(Input)
 DECLARE_LAYER(L2Normalization)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 7b9cb3d..8a1437a 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -872,6 +872,11 @@
     return m_Graph->AddLayer<RsqrtLayer>(name);
 }
 
+IConnectableLayer* Network::AddGatherLayer(const char* name)
+{
+    return m_Graph->AddLayer<GatherLayer>(name);
+}
+
 OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
     : m_Graph(std::move(graph))
 {
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index ba741e9..7690faf 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -66,6 +66,8 @@
         const ConstTensor& biases,
         const char* name = nullptr) override;
 
+    IConnectableLayer* AddGatherLayer(const char* name = nullptr) override;
+
     IConnectableLayer* AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
                                        const char* name = nullptr) override;
 
diff --git a/src/armnn/layers/GatherLayer.cpp b/src/armnn/layers/GatherLayer.cpp
new file mode 100644
index 0000000..2e5d011
--- /dev/null
+++ b/src/armnn/layers/GatherLayer.cpp
@@ -0,0 +1,37 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "GatherLayer.hpp"
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+namespace armnn
+{
+
+GatherLayer::GatherLayer(const char* name)
+    : Layer(2, 1, LayerType::Gather, name)
+{
+}
+
+std::unique_ptr<IWorkload> GatherLayer::CreateWorkload(const armnn::Graph& graph,
+                                                       const armnn::IWorkloadFactory& factory) const
+{
+    GatherQueueDescriptor descriptor;
+    return factory.CreateGather(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+GatherLayer* GatherLayer::Clone(Graph& graph) const
+{
+    return CloneBase<GatherLayer>(graph, GetName());
+}
+
+void GatherLayer::ValidateTensorShapesFromInputs()
+{
+}
+
+} // namespace armnn
diff --git a/src/armnn/layers/GatherLayer.hpp b/src/armnn/layers/GatherLayer.hpp
new file mode 100644
index 0000000..7b3aebe
--- /dev/null
+++ b/src/armnn/layers/GatherLayer.hpp
@@ -0,0 +1,41 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "Layer.hpp"
+
+namespace armnn
+{
+
+/// This layer represents a Gather operator.
+class GatherLayer : public Layer
+{
+public:
+    /// Makes a workload for the Gather type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
+    virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
+                                                      const IWorkloadFactory& factory) const override;
+
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
+    GatherLayer* Clone(Graph& graph) const override;
+
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref GatherLayer.
+    void ValidateTensorShapesFromInputs() override;
+
+protected:
+    /// Constructor to create a GatherLayer.
+    /// @param [in] name Optional name for the layer.
+    GatherLayer(const char* name);
+
+    /// Default destructor
+    ~GatherLayer() = default;
+};
+
+} // namespace armnn
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 187d2f7..2e43657 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -156,6 +156,14 @@
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
+bool LayerSupportBase::IsGatherSupported(const armnn::TensorInfo& input0,
+                                         const armnn::TensorInfo& input1,
+                                         const armnn::TensorInfo& output,
+                                         armnn::Optional<std::string&> reasonIfUnsupported) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
 bool LayerSupportBase::IsInputSupported(const TensorInfo& input,
                                         Optional<std::string&> reasonIfUnsupported) const
 {
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index c6f943c..77cb302 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -92,6 +92,11 @@
                                    const FullyConnectedDescriptor& descriptor,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsGatherSupported(const TensorInfo& input0,
+                           const TensorInfo& input1,
+                           const TensorInfo& output,
+                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsGreaterSupported(const TensorInfo& input0,
                             const TensorInfo& input1,
                             const TensorInfo& output,
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 97981e2..072b9a9 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -1051,6 +1051,12 @@
                               "output");
 }
 
+void GatherQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+    ValidateTwoInputs(workloadInfo, "GatherQueueDescriptor");
+    ValidateSingleOutput(workloadInfo, "GatherQueueDescriptor");
+}
+
 void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
 {
     // This is internally generated so it should not need validation.
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index 453896b..2d68c9f 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -378,6 +378,11 @@
     void Validate(const WorkloadInfo& workloadInfo) const;
 };
 
+struct GatherQueueDescriptor : QueueDescriptor
+{
+    void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
 struct PreCompiledQueueDescriptor : QueueDescriptorWithParameters<PreCompiledDescriptor>
 {
     PreCompiledQueueDescriptor()
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 0f015bd..a70ec7e 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -315,6 +315,17 @@
                                                reason);
             break;
         }
+        case LayerType::Gather:
+        {
+            const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+            const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
+            const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+            result = layerSupportObject->IsGatherSupported(OverrideDataType(input0, dataType),
+                                                           OverrideDataType(input1, dataType),
+                                                           OverrideDataType(output, dataType),
+                                                           reason);
+            break;
+        }
         case LayerType::Input:
         {
             const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo();
@@ -477,16 +488,16 @@
                                                             reason);
             break;
         }
-    case LayerType::MemCopy:
-    {
-        const TensorInfo& input  = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
-        const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+        case LayerType::MemCopy:
+        {
+            const TensorInfo& input  = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+            const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
 
-        result = layerSupportObject->IsMemCopySupported(OverrideDataType(input, dataType),
-                                                        OverrideDataType(output, dataType),
-                                                        reason);
-        break;
-    }
+            result = layerSupportObject->IsMemCopySupported(OverrideDataType(input, dataType),
+                                                            OverrideDataType(output, dataType),
+                                                            reason);
+            break;
+        }
         case LayerType::Merger:
         {
             auto cLayer = boost::polymorphic_downcast<const MergerLayer*>(&layer);
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index d516698..dd47dd6 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -162,6 +162,9 @@
 
     virtual std::unique_ptr<IWorkload> CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
                                                          const WorkloadInfo& info) const = 0;
+
+    virtual std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
+                                                    const WorkloadInfo& info) const = 0;
 };
 
 } //namespace armnn
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index edc58cf..43c7581 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -344,6 +344,8 @@
 
 DECLARE_LAYER_POLICY_2_PARAM(FullyConnected)
 
+DECLARE_LAYER_POLICY_1_PARAM(Gather)
+
 DECLARE_LAYER_POLICY_1_PARAM(Greater)
 
 DECLARE_LAYER_POLICY_CUSTOM_PARAM(Input, armnn::LayerBindingId)
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index 28011cf..71c1b89 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -362,4 +362,10 @@
     return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
 }
 
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateGather(const armnn::GatherQueueDescriptor& descriptor,
+                                                           const armnn::WorkloadInfo& info) const
+{
+    return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+}
+
 } // namespace armnn
diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp
index 286e897..ba2f066 100644
--- a/src/backends/cl/ClWorkloadFactory.hpp
+++ b/src/backends/cl/ClWorkloadFactory.hpp
@@ -153,6 +153,9 @@
     virtual std::unique_ptr<IWorkload> CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
                                                          const WorkloadInfo& info) const override;
 
+    virtual std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
+                                                    const WorkloadInfo& info) const override;
+
 private:
     template<typename FloatWorkload, typename Uint8Workload, typename QueueDescriptorType, typename... Args>
     static std::unique_ptr<IWorkload> MakeWorkload(const QueueDescriptorType& descriptor,
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 3728c86..311479a 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -330,4 +330,10 @@
     return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
 }
 
+std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateGather(const armnn::GatherQueueDescriptor& descriptor,
+                                                             const armnn::WorkloadInfo& info) const
+{
+    return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
+}
+
 } // namespace armnn
diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp
index 68317ed..fe9f1b0 100644
--- a/src/backends/neon/NeonWorkloadFactory.hpp
+++ b/src/backends/neon/NeonWorkloadFactory.hpp
@@ -154,6 +154,9 @@
     virtual std::unique_ptr<IWorkload> CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
                                                          const WorkloadInfo& info) const override;
 
+    virtual std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
+                                                    const WorkloadInfo& info) const override;
+
 private:
     mutable std::shared_ptr<NeonMemoryManager> m_MemoryManager;
 };
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 361a3f1..cb7d6ea 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -324,4 +324,10 @@
     return nullptr;
 }
 
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateGather(const armnn::GatherQueueDescriptor& descriptor,
+                                                            const armnn::WorkloadInfo& info) const
+{
+    return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+}
+
 } // namespace armnn
diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp
index 432ac72..443af76 100644
--- a/src/backends/reference/RefWorkloadFactory.hpp
+++ b/src/backends/reference/RefWorkloadFactory.hpp
@@ -171,6 +171,9 @@
     virtual std::unique_ptr<IWorkload> CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
                                                          const WorkloadInfo& info) const override;
 
+    virtual std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
+                                                    const WorkloadInfo& info) const override;
+
 private:
 
     template <typename F32Workload, typename U8Workload, typename QueueDescriptorType>