IVGCVSW-2697 Add Serialize/Deserialize for the Merger Layer

* Force generation of schema header in every build
* Also fixed typo in OriginsDescriptor comment (Descriptors.hpp)
* Added Serialize/Deserialize check on Addition Layer
* Added Serialize/Deserialize check on Floor Layer
* Added Serialize/Deserialize check on Minimum Layer
* Added Serialize/Deserialize check on Maximum Layer
* Added Serialize/Deserialize check on Multiplication Layer
* Added Serialize/Deserialize check on Division Layer

Change-Id: I1358ea4db7ca506d8bcec2ee64e1fbad6005e723
Signed-off-by: Jim Flynn <jim.flynn@arm.com>
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 1dff39f..003c9df 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -150,6 +150,7 @@
 endif()
 
 if(BUILD_ARMNN_SERIALIZER)
+    file(REMOVE ${CMAKE_CURRENT_BINARY_DIR}/src/armnnSerializer/ArmnnSchema_generated.h)
     add_custom_command (
         OUTPUT  src/armnnSerializer/ArmnnSchema_generated.h
         COMMAND ${FLATC_DIR}/flatc -o ${CMAKE_CURRENT_BINARY_DIR}/src/armnnSerializer --cpp
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index dc09cc6..2cf0974 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -69,7 +69,7 @@
 
     /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
     /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
-    /// If the coord is greater than or equal to GetNumViews(), then the coord argument is out of range.
+    /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
     Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
     /// Get the number of views.
     uint32_t GetNumViews() const;
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index ed110ad..d62751d 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -201,6 +201,7 @@
     m_ParserFunctions[Layer_MaximumLayer]                = &Deserializer::ParseMaximum;
     m_ParserFunctions[Layer_MeanLayer]                   = &Deserializer::ParseMean;
     m_ParserFunctions[Layer_MinimumLayer]                = &Deserializer::ParseMinimum;
+    m_ParserFunctions[Layer_MergerLayer]                 = &Deserializer::ParseMerger;
     m_ParserFunctions[Layer_MultiplicationLayer]         = &Deserializer::ParseMultiplication;
     m_ParserFunctions[Layer_NormalizationLayer]          = &Deserializer::ParseNormalization;
     m_ParserFunctions[Layer_PadLayer]                    = &Deserializer::ParsePad;
@@ -255,6 +256,8 @@
             return graphPtr->layers()->Get(layerIndex)->layer_as_MinimumLayer()->base();
         case Layer::Layer_MaximumLayer:
             return graphPtr->layers()->Get(layerIndex)->layer_as_MaximumLayer()->base();
+        case Layer::Layer_MergerLayer:
+            return graphPtr->layers()->Get(layerIndex)->layer_as_MergerLayer()->base();
         case Layer::Layer_MultiplicationLayer:
             return graphPtr->layers()->Get(layerIndex)->layer_as_MultiplicationLayer()->base();
         case Layer::Layer_NormalizationLayer:
@@ -1111,6 +1114,45 @@
     RegisterOutputSlots(graph, layerIndex, layer);
 }
 
+void Deserializer::ParseMerger(GraphPtr graph, unsigned int layerIndex)
+{
+    CHECK_LAYERS(graph, 0, layerIndex);
+    CHECK_LOCATION();
+
+    auto outputs = GetOutputs(graph, layerIndex);
+    CHECK_VALID_SIZE(outputs.size(), 1);
+
+    auto mergerLayer = graph->layers()->Get(layerIndex)->layer_as_MergerLayer();
+    auto layerName = GetLayerName(graph, layerIndex);
+    auto mergerDescriptor = mergerLayer->descriptor();
+    unsigned int numViews = mergerDescriptor->numViews();
+    unsigned int numDimensions = mergerDescriptor->numDimensions();
+
+    // can now check the number of inputs == number of views
+    auto inputs = GetInputs(graph, layerIndex);
+    CHECK_VALID_SIZE(inputs.size(), numViews);
+
+    armnn::OriginsDescriptor descriptor(numViews, numDimensions);
+    auto originsPtr = mergerDescriptor->viewOrigins();
+    for (unsigned int v = 0; v < numViews; ++v)
+    {
+        auto originPtr = originsPtr->Get(v);
+        for (unsigned int d = 0; d < numDimensions; ++d)
+        {
+            uint32_t value = originPtr->data()->Get(d);
+            descriptor.SetViewOriginCoord(v, d, value);
+        }
+    }
+    descriptor.SetConcatAxis(mergerDescriptor->concatAxis());
+
+    IConnectableLayer* layer = m_Network->AddMergerLayer(descriptor, layerName.c_str());
+    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+    RegisterInputSlots(graph, layerIndex, layer);
+    RegisterOutputSlots(graph, layerIndex, layer);
+}
+
 void Deserializer::ParseMultiplication(GraphPtr graph, unsigned int layerIndex)
 {
     CHECK_LAYERS(graph, 0, layerIndex);
diff --git a/src/armnnDeserializer/Deserializer.hpp b/src/armnnDeserializer/Deserializer.hpp
index e837a08..d085946 100644
--- a/src/armnnDeserializer/Deserializer.hpp
+++ b/src/armnnDeserializer/Deserializer.hpp
@@ -89,6 +89,7 @@
     void ParseMaximum(GraphPtr graph, unsigned int layerIndex);
     void ParseMean(GraphPtr graph, unsigned int layerIndex);
     void ParseMinimum(GraphPtr graph, unsigned int layerIndex);
+    void ParseMerger(GraphPtr graph, unsigned int layerIndex);
     void ParseMultiplication(GraphPtr graph, unsigned int layerIndex);
     void ParseNormalization(GraphPtr graph, unsigned int layerIndex);
     void ParsePad(GraphPtr graph, unsigned int layerIndex);
diff --git a/src/armnnDeserializer/DeserializerSupport.md b/src/armnnDeserializer/DeserializerSupport.md
index 0f3d91d..3762134 100644
--- a/src/armnnDeserializer/DeserializerSupport.md
+++ b/src/armnnDeserializer/DeserializerSupport.md
@@ -21,6 +21,7 @@
 * Greater
 * Maximum
 * Mean
+* Merger
 * Minimum
 * Multiplication
 * Normalization
diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs
index 01142ff..a5fb4b6 100644
--- a/src/armnnSerializer/ArmnnSchema.fbs
+++ b/src/armnnSerializer/ArmnnSchema.fbs
@@ -110,7 +110,8 @@
     Subtraction = 26,
     StridedSlice = 27,
     Gather = 28,
-    Mean = 29
+    Mean = 29,
+    Merger = 30
 }
 
 // Base layer table to be used as part of other layers
@@ -415,6 +416,22 @@
     dataLayout:DataLayout;
 }
 
+table MergerLayer {
+    base:LayerBase;
+    descriptor:OriginsDescriptor;
+}
+
+table UintVector {
+   data:[uint];
+}
+
+table OriginsDescriptor {
+   concatAxis:uint;
+   numViews:uint;
+   numDimensions:uint;
+   viewOrigins:[UintVector];
+}
+
 union Layer {
     ActivationLayer,
     AdditionLayer,
@@ -445,7 +462,8 @@
     SubtractionLayer,
     StridedSliceLayer,
     GatherLayer,
-    MeanLayer
+    MeanLayer,
+    MergerLayer
 }
 
 table AnyLayer {
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index 56c4281..3b71e5f 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -361,6 +361,39 @@
     CreateAnyLayer(fbMinimumLayer.o, serializer::Layer::Layer_MinimumLayer);
 }
 
+void SerializerVisitor::VisitMergerLayer(const armnn::IConnectableLayer* layer,
+                                         const armnn::OriginsDescriptor& mergerDescriptor,
+                                         const char* name)
+{
+    auto flatBufferMergerBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Merger);
+
+    std::vector<flatbuffers::Offset<UintVector>> views;
+    for (unsigned int v = 0; v < mergerDescriptor.GetNumViews(); ++v)
+    {
+        const uint32_t* origin = mergerDescriptor.GetViewOrigin(v);
+        std::vector<uint32_t> origins;
+        for (unsigned int d = 0; d < mergerDescriptor.GetNumDimensions(); ++d)
+        {
+            origins.push_back(origin[d]);
+        }
+        auto view = m_flatBufferBuilder.CreateVector(origins);
+        auto uintVector = CreateUintVector(m_flatBufferBuilder, view);
+        views.push_back(uintVector);
+    }
+
+    auto flatBufferMergerDescriptor = CreateOriginsDescriptor(m_flatBufferBuilder,
+                                                              mergerDescriptor.GetConcatAxis(),
+                                                              mergerDescriptor.GetNumViews(),
+                                                              mergerDescriptor.GetNumDimensions(),
+                                                              m_flatBufferBuilder.CreateVector(views));
+
+    auto flatBufferLayer = CreateMergerLayer(m_flatBufferBuilder,
+                                             flatBufferMergerBaseLayer,
+                                             flatBufferMergerDescriptor);
+
+    CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_MergerLayer);
+}
+
 void SerializerVisitor::VisitMultiplicationLayer(const armnn::IConnectableLayer* layer, const char* name)
 {
     auto fbMultiplicationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Multiplication);
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index f928c37..e93e4ce 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -112,6 +112,10 @@
     void VisitMaximumLayer(const armnn::IConnectableLayer* layer,
                            const char* name = nullptr) override;
 
+    void VisitMergerLayer(const armnn::IConnectableLayer* layer,
+                          const armnn::OriginsDescriptor& mergerDescriptor,
+                          const char* name = nullptr) override;
+
     void VisitMultiplicationLayer(const armnn::IConnectableLayer* layer,
                                   const char* name = nullptr) override;
 
diff --git a/src/armnnSerializer/SerializerSupport.md b/src/armnnSerializer/SerializerSupport.md
index bb50242..ae8691e 100644
--- a/src/armnnSerializer/SerializerSupport.md
+++ b/src/armnnSerializer/SerializerSupport.md
@@ -21,6 +21,7 @@
 * Greater
 * Maximum
 * Mean
+* Merger
 * Minimum
 * Multiplication
 * Normalization
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index a18ae32..5a054c2 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -148,7 +148,7 @@
 
 BOOST_AUTO_TEST_SUITE(SerializerTests)
 
-BOOST_AUTO_TEST_CASE(SerializeAddition)
+BOOST_AUTO_TEST_CASE(SerializeDeserializeAddition)
 {
     class VerifyAdditionName : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
     {
@@ -188,6 +188,12 @@
 
     VerifyAdditionName nameChecker;
     deserializedNetwork->Accept(nameChecker);
+
+    CheckDeserializedNetworkAgainstOriginal<float>(*deserializedNetwork,
+                                                   *network,
+                                                   {info.GetShape(), info.GetShape()},
+                                                   {info.GetShape()},
+                                                   {0, 1});
 }
 
 BOOST_AUTO_TEST_CASE(SerializeConstant)
@@ -255,7 +261,7 @@
                                                    {commonTensorInfo.GetShape()});
 }
 
-BOOST_AUTO_TEST_CASE(SerializeFloor)
+BOOST_AUTO_TEST_CASE(SerializeDeserializeFloor)
 {
     class VerifyFloorName : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
     {
@@ -269,7 +275,7 @@
     const armnn::TensorInfo info({4,4}, armnn::DataType::Float32);
 
     armnn::INetworkPtr network = armnn::INetwork::Create();
-    armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(1);
+    armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0);
 
     const char* floorLayerName = "floor";
 
@@ -295,9 +301,14 @@
 
     VerifyFloorName nameChecker;
     deserializedNetwork->Accept(nameChecker);
+
+    CheckDeserializedNetworkAgainstOriginal<float>(*deserializedNetwork,
+                                                   *network,
+                                                   {info.GetShape()},
+                                                   {info.GetShape()});
 }
 
-BOOST_AUTO_TEST_CASE(SerializeMinimum)
+BOOST_AUTO_TEST_CASE(SerializeDeserializeMinimum)
 {
     class VerifyMinimumName : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
     {
@@ -346,9 +357,15 @@
 
     VerifyMinimumName nameChecker(minimumLayerName);
     deserializedNetwork->Accept(nameChecker);
+
+    CheckDeserializedNetworkAgainstOriginal<float>(*deserializedNetwork,
+                                                   *network,
+                                                   {info.GetShape(), info.GetShape()},
+                                                   {info.GetShape()},
+                                                   {0, 1});
 }
 
-BOOST_AUTO_TEST_CASE(SerializeMaximum)
+BOOST_AUTO_TEST_CASE(SerializeDeserializeMaximum)
 {
     class VerifyMaximumName : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
     {
@@ -397,9 +414,15 @@
 
     VerifyMaximumName nameChecker(maximumLayerName);
     deserializedNetwork->Accept(nameChecker);
+
+    CheckDeserializedNetworkAgainstOriginal<float>(*deserializedNetwork,
+                                                   *network,
+                                                   {info.GetShape(), info.GetShape()},
+                                                   {info.GetShape()},
+                                                   {0, 1});
 }
 
-BOOST_AUTO_TEST_CASE(SerializeMultiplication)
+BOOST_AUTO_TEST_CASE(SerializeDeserializeMultiplication)
 {
     class VerifyMultiplicationName : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
     {
@@ -442,6 +465,12 @@
 
     VerifyMultiplicationName nameChecker;
     deserializedNetwork->Accept(nameChecker);
+
+    CheckDeserializedNetworkAgainstOriginal<float>(*deserializedNetwork,
+                                                   *network,
+                                                   {info.GetShape(), info.GetShape()},
+                                                   {info.GetShape()},
+                                                   {0, 1});
 }
 
 BOOST_AUTO_TEST_CASE(SerializeDeserializeConvolution2d)
@@ -998,7 +1027,7 @@
                                                    {outputInfo.GetShape()});
 }
 
-BOOST_AUTO_TEST_CASE(SerializeDivision)
+BOOST_AUTO_TEST_CASE(SerializeDeserializeDivision)
 {
     class VerifyDivisionName : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
     {
@@ -1041,6 +1070,12 @@
 
     VerifyDivisionName nameChecker;
     deserializedNetwork->Accept(nameChecker);
+
+    CheckDeserializedNetworkAgainstOriginal<float>(*deserializedNetwork,
+                                                   *network,
+                                                   {info.GetShape(), info.GetShape()},
+                                                   {info.GetShape()},
+                                                   {0, 1});
 }
 
 BOOST_AUTO_TEST_CASE(SerializeDeserializeNormalization)
@@ -1173,7 +1208,7 @@
                                                    {outputTensorInfo.GetShape()});
 }
 
-BOOST_AUTO_TEST_CASE(SerializeRsqrt)
+BOOST_AUTO_TEST_CASE(SerializeDeserializeRsqrt)
 {
     class VerifyRsqrtName : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
     {
@@ -1374,4 +1409,58 @@
                                                    {outputTensorInfo.GetShape()});
 }
 
+BOOST_AUTO_TEST_CASE(SerializeDeserializeMerger)
+{
+    class VerifyMergerName : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
+    {
+    public:
+        void VisitMergerLayer(const armnn::IConnectableLayer* layer,
+                              const armnn::OriginsDescriptor& mergerDescriptor,
+                              const char* name = nullptr) override
+        {
+            BOOST_TEST(name == "MergerLayer");
+        }
+    };
+
+    unsigned int inputShapeOne[] = {2, 3, 2, 2};
+    unsigned int inputShapeTwo[] = {2, 3, 2, 2};
+    unsigned int outputShape[] = {4, 3, 2, 2};
+
+    const armnn::TensorInfo inputOneTensorInfo = armnn::TensorInfo(4, inputShapeOne, armnn::DataType::Float32);
+    const armnn::TensorInfo inputTwoTensorInfo = armnn::TensorInfo(4, inputShapeTwo, armnn::DataType::Float32);
+    const armnn::TensorInfo outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
+
+    std::vector<armnn::TensorShape> shapes;
+    shapes.push_back(inputOneTensorInfo.GetShape());
+    shapes.push_back(inputTwoTensorInfo.GetShape());
+
+    armnn::MergerDescriptor descriptor =
+        armnn::CreateMergerDescriptorForConcatenation(shapes.begin(), shapes.end(), 0);
+
+    armnn::INetworkPtr network = armnn::INetwork::Create();
+    armnn::IConnectableLayer* const inputLayerOne = network->AddInputLayer(0);
+    inputLayerOne->GetOutputSlot(0).SetTensorInfo(inputOneTensorInfo);
+    armnn::IConnectableLayer* const inputLayerTwo = network->AddInputLayer(1);
+    inputLayerTwo->GetOutputSlot(0).SetTensorInfo(inputTwoTensorInfo);
+    armnn::IConnectableLayer* const mergerLayer = network->AddMergerLayer(descriptor, "MergerLayer");
+    mergerLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+    armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
+
+    inputLayerOne->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(0));
+    inputLayerTwo->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(1));
+    mergerLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+
+    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
+    BOOST_CHECK(deserializedNetwork);
+
+    VerifyMergerName nameChecker;
+    deserializedNetwork->Accept(nameChecker);
+
+    CheckDeserializedNetworkAgainstOriginal<float>(*deserializedNetwork,
+                                                   *network,
+                                                   {inputOneTensorInfo.GetShape(), inputTwoTensorInfo.GetShape()},
+                                                   {outputTensorInfo.GetShape()},
+                                                   {0, 1});
+}
+
 BOOST_AUTO_TEST_SUITE_END()