IVGCVSW-2645 Add Serializer & Deserializer for Pooling2d

Change-Id: Iba41da3cccd539a0175f2ed0ff9a8b6a23c5fb6f
Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Signed-off-by: Saoirse Stewart <saoirse.stewart@arm.com>
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 6503e6c..4295623 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -163,6 +163,8 @@
         src/armnnSerializer/Schema_generated.h
         src/armnnSerializer/Serializer.hpp
         src/armnnSerializer/Serializer.cpp
+        src/armnnSerializer/SerializerUtils.hpp
+        src/armnnSerializer/SerializerUtils.cpp
         src/armnnDeserializeParser/DeserializeParser.hpp
         src/armnnDeserializeParser/DeserializeParser.cpp
         )
@@ -554,6 +556,7 @@
                 src/armnnSerializer/test/SerializerTests.cpp
                 src/armnnDeserializeParser/test/DeserializeAdd.cpp
                 src/armnnDeserializeParser/test/DeserializeMultiplication.cpp
+                src/armnnDeserializeParser/test/DeserializePooling2d.cpp
                 src/armnnDeserializeParser/test/ParserFlatbuffersSerializeFixture.hpp
                 src/armnnDeserializeParser/test/SchemaSerialize.s
                 )
diff --git a/src/armnnDeserializeParser/DeserializeParser.cpp b/src/armnnDeserializeParser/DeserializeParser.cpp
index eb7bcca..f47c23f 100644
--- a/src/armnnDeserializeParser/DeserializeParser.cpp
+++ b/src/armnnDeserializeParser/DeserializeParser.cpp
@@ -136,6 +136,7 @@
     // register supported layers
     m_ParserFunctions[Layer_AdditionLayer]       = &DeserializeParser::ParseAdd;
     m_ParserFunctions[Layer_MultiplicationLayer] = &DeserializeParser::ParseMultiplication;
+    m_ParserFunctions[Layer_Pooling2dLayer]      = &DeserializeParser::ParsePooling2d;
     m_ParserFunctions[Layer_SoftmaxLayer]        = &DeserializeParser::ParseSoftmax;
 }
 
@@ -153,6 +154,8 @@
             return graphPtr->layers()->Get(layerIndex)->layer_as_MultiplicationLayer()->base();
         case Layer::Layer_OutputLayer:
             return graphPtr->layers()->Get(layerIndex)->layer_as_OutputLayer()->base()->base();
+        case Layer::Layer_Pooling2dLayer:
+            return graphPtr->layers()->Get(layerIndex)->layer_as_Pooling2dLayer()->base();
         case Layer::Layer_SoftmaxLayer:
             return graphPtr->layers()->Get(layerIndex)->layer_as_SoftmaxLayer()->base();
         case Layer::Layer_NONE:
@@ -356,7 +359,6 @@
     }
     std::ifstream file(fileName, std::ios::binary);
     fileContent = std::string((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
-
     return LoadGraphFromBinary(reinterpret_cast<const uint8_t*>(fileContent.c_str()), fileContent.size());
 }
 
@@ -581,8 +583,8 @@
     auto outputs = GetOutputs(m_Graph, layerIndex);
     CHECK_VALID_SIZE(outputs.size(), 1);
 
-    auto layerName = boost::str(boost::format("Addition:%1%") % layerIndex);
-    IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
+    m_layerName = boost::str(boost::format("Addition:%1%") % layerIndex);
+    IConnectableLayer* layer = m_Network->AddAdditionLayer(m_layerName.c_str());
 
     armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -601,8 +603,8 @@
     auto outputs = GetOutputs(m_Graph, layerIndex);
     CHECK_VALID_SIZE(outputs.size(), 1);
 
-    auto layerName = boost::str(boost::format("Multiplication:%1%") % layerIndex);
-    IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
+    m_layerName = boost::str(boost::format("Multiplication:%1%") % layerIndex);
+    IConnectableLayer* layer = m_Network->AddMultiplicationLayer(m_layerName.c_str());
 
     armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -611,6 +613,119 @@
     RegisterOutputSlots(layerIndex, layer);
 }
 
+armnn::Pooling2dDescriptor DeserializeParser::GetPoolingDescriptor(DeserializeParser::PoolingDescriptor pooling2dDesc,
+                                                                   unsigned int layerIndex)
+{
+    armnn::Pooling2dDescriptor desc;
+
+    switch (pooling2dDesc->poolType())
+    {
+        case PoolingAlgorithm_Average:
+        {
+            desc.m_PoolType = armnn::PoolingAlgorithm::Average;
+            m_layerName     = boost::str(boost::format("AveragePool2D:%1%") % layerIndex);
+            break;
+        }
+        case PoolingAlgorithm_Max:
+        {
+            desc.m_PoolType = armnn::PoolingAlgorithm::Max;
+            m_layerName     = boost::str(boost::format("MaxPool2D:%1%") % layerIndex);
+            break;
+        }
+        default:
+        {
+            BOOST_ASSERT_MSG(false, "Unsupported pooling algorithm");
+        }
+    }
+
+    switch (pooling2dDesc->outputShapeRounding())
+    {
+        case OutputShapeRounding_Floor:
+        {
+            desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
+            break;
+        }
+        case OutputShapeRounding_Ceiling:
+        {
+            desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Ceiling;
+            break;
+        }
+        default:
+        {
+            BOOST_ASSERT_MSG(false, "Unsupported output shape rounding");
+        }
+    }
+
+    switch (pooling2dDesc->paddingMethod())
+    {
+        case PaddingMethod_Exclude:
+        {
+            desc.m_PaddingMethod = armnn::PaddingMethod::Exclude;
+            break;
+        }
+        case PaddingMethod_IgnoreValue:
+        {
+            desc.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
+            break;
+        }
+        default:
+        {
+            BOOST_ASSERT_MSG(false, "Unsupported padding method");
+        }
+    }
+
+    switch (pooling2dDesc->dataLayout())
+    {
+        case DataLayout_NCHW:
+        {
+            desc.m_DataLayout = armnn::DataLayout::NCHW;
+            break;
+        }
+        case DataLayout_NHWC:
+        {
+            desc.m_DataLayout = armnn::DataLayout::NHWC;
+            break;
+        }
+        default:
+        {
+            BOOST_ASSERT_MSG(false, "Unsupported data layout");
+        }
+    }
+
+    desc.m_PadRight   = pooling2dDesc->padRight();
+    desc.m_PadLeft    = pooling2dDesc->padLeft();
+    desc.m_PadBottom  = pooling2dDesc->padBottom();
+    desc.m_PadTop     = pooling2dDesc->padTop();
+    desc.m_StrideX    = pooling2dDesc->strideX();
+    desc.m_StrideY    = pooling2dDesc->strideY();
+    desc.m_PoolWidth  = pooling2dDesc->poolWidth();
+    desc.m_PoolHeight = pooling2dDesc->poolHeight();
+
+    return desc;
+}
+
+void DeserializeParser::ParsePooling2d(unsigned int layerIndex)
+{
+    CHECK_LAYERS(m_Graph, 0, layerIndex);
+
+    auto pooling2dDes = m_Graph->layers()->Get(layerIndex)->layer_as_Pooling2dLayer()->descriptor();
+
+    auto inputs = GetInputs(m_Graph, layerIndex);
+    CHECK_VALID_SIZE(inputs.size(), 1);
+
+    auto outputs = GetOutputs(m_Graph, layerIndex);
+    CHECK_VALID_SIZE(outputs.size(), 1);
+    auto outputInfo = ToTensorInfo(outputs[0]);
+
+    auto pooling2dDescriptor = GetPoolingDescriptor(pooling2dDes, layerIndex);
+
+    IConnectableLayer* layer = m_Network->AddPooling2dLayer(pooling2dDescriptor, m_layerName.c_str());
+    layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+    RegisterInputSlots(layerIndex, layer);
+    RegisterOutputSlots(layerIndex, layer);
+}
+
 void DeserializeParser::ParseSoftmax(unsigned int layerIndex)
 {
     CHECK_LAYERS(m_Graph, 0, layerIndex);
diff --git a/src/armnnDeserializeParser/DeserializeParser.hpp b/src/armnnDeserializeParser/DeserializeParser.hpp
index ddd02ab..1edb5a9 100644
--- a/src/armnnDeserializeParser/DeserializeParser.hpp
+++ b/src/armnnDeserializeParser/DeserializeParser.hpp
@@ -17,6 +17,7 @@
     // Shorthands for deserializer types
     using GraphPtr = const armnn::armnnSerializer::SerializedGraph *;
     using TensorRawPtr = const armnn::armnnSerializer::TensorInfo *;
+    using PoolingDescriptor = const armnn::armnnSerializer::Pooling2dDescriptor *;
     using TensorRawPtrVector = std::vector<TensorRawPtr>;
     using LayerRawPtr = const armnn::armnnSerializer::LayerBase *;
     using LayerBaseRawPtr = const armnn::armnnSerializer::LayerBase *;
@@ -50,6 +51,8 @@
     static LayerBaseRawPtrVector GetGraphOutputs(const GraphPtr& graphPtr);
     static LayerBaseRawPtr GetBaseLayer(const GraphPtr& graphPtr, unsigned int layerIndex);
     static int32_t GetBindingLayerInfo(const GraphPtr& graphPtr, unsigned int layerIndex);
+    armnn::Pooling2dDescriptor GetPoolingDescriptor(PoolingDescriptor pooling2dDescriptor,
+                                                    unsigned int layerIndex);
 
 private:
     // No copying allowed until it is wanted and properly implemented
@@ -65,6 +68,7 @@
     void ParseUnsupportedLayer(unsigned int layerIndex);
     void ParseAdd(unsigned int layerIndex);
     void ParseMultiplication(unsigned int layerIndex);
+    void ParsePooling2d(unsigned int layerIndex);
     void ParseSoftmax(unsigned int layerIndex);
 
     void RegisterOutputSlotOfConnection(uint32_t connectionIndex, armnn::IOutputSlot* slot);
@@ -82,6 +86,7 @@
     armnn::INetworkPtr                    m_Network;
     GraphPtr                              m_Graph;
     std::vector<LayerParsingFunction>     m_ParserFunctions;
+    std::string                           m_layerName;
 
     /// This holds the data of the file that was read in from CreateNetworkFromBinaryFile
     /// Needed for m_Graph to point to
diff --git a/src/armnnDeserializeParser/test/DeserializePooling2d.cpp b/src/armnnDeserializeParser/test/DeserializePooling2d.cpp
new file mode 100644
index 0000000..70b96ba
--- /dev/null
+++ b/src/armnnDeserializeParser/test/DeserializePooling2d.cpp
@@ -0,0 +1,162 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersSerializeFixture.hpp"
+#include "../DeserializeParser.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(DeserializeParser)
+
+struct Pooling2dFixture : public ParserFlatbuffersSerializeFixture
+{
+    explicit Pooling2dFixture(const std::string &inputShape,
+                              const std::string &outputShape,
+                              const std::string &dataType,
+                              const std::string &dataLayout,
+                              const std::string &poolingAlgorithm)
+    {
+        m_JsonString = R"(
+    {
+            inputIds: [0],
+            outputIds: [2],
+            layers: [
+            {
+                layer_type: "InputLayer",
+                layer: {
+                      base: {
+                            layerBindingId: 0,
+                            base: {
+                                index: 0,
+                                layerName: "InputLayer",
+                                layerType: "Input",
+                                inputSlots: [{
+                                    index: 0,
+                                    connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+                                }],
+                                outputSlots: [ {
+                                    index: 0,
+                                    tensorInfo: {
+                                        dimensions: )" + inputShape + R"(,
+                                        dataType: )" + dataType + R"(
+                                        }}]
+                                }
+                }}},
+                {
+                layer_type: "Pooling2dLayer",
+                layer: {
+                      base: {
+                           index: 1,
+                           layerName: "Pooling2dLayer",
+                           layerType: "Pooling2d",
+                           inputSlots: [{
+                                  index: 0,
+                                  connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+                           }],
+                           outputSlots: [ {
+                                  index: 0,
+                                  tensorInfo: {
+                                       dimensions: )" + outputShape + R"(,
+                                       dataType: )" + dataType + R"(
+
+                           }}]},
+                      descriptor: {
+                           poolType: )" + poolingAlgorithm + R"(,
+                           outputShapeRounding: "Floor",
+                           paddingMethod: Exclude,
+                           dataLayout: )" + dataLayout + R"(,
+                           padLeft: 0,
+                           padRight: 0,
+                           padTop: 0,
+                           padBottom: 0,
+                           poolWidth: 2,
+                           poolHeight: 2,
+                           strideX: 2,
+                           strideY: 2
+                           }
+                }},
+                {
+                layer_type: "OutputLayer",
+                layer: {
+                    base:{
+                          layerBindingId: 0,
+                          base: {
+                                index: 2,
+                                layerName: "OutputLayer",
+                                layerType: "Output",
+                                inputSlots: [{
+                                    index: 0,
+                                    connection: {sourceLayerIndex:1, outputSlotIndex:0 },
+                                }],
+                                outputSlots: [ {
+                                    index: 0,
+                                    tensorInfo: {
+                                        dimensions: )" + outputShape + R"(,
+                                        dataType: )" + dataType + R"(
+                                    },
+                            }],
+                        }}},
+            }]
+     }
+ )";
+        SetupSingleInputSingleOutput("InputLayer", "OutputLayer");
+    }
+};
+
+struct SimpleAvgPoolingFixture : Pooling2dFixture
+{
+    SimpleAvgPoolingFixture() : Pooling2dFixture("[ 1, 2, 2, 1 ]", "[ 1, 1, 1, 1 ]",
+                                              "Float32", "NHWC", "Average") {}
+};
+
+struct SimpleAvgPoolingFixture2 : Pooling2dFixture
+{
+    SimpleAvgPoolingFixture2() : Pooling2dFixture("[ 1, 2, 2, 1 ]",
+                                               "[ 1, 1, 1, 1 ]",
+                                                "QuantisedAsymm8", "NHWC", "Average") {}
+};
+
+struct SimpleMaxPoolingFixture : Pooling2dFixture
+{
+    SimpleMaxPoolingFixture() : Pooling2dFixture("[ 1, 1, 2, 2 ]",
+                                                 "[ 1, 1, 1, 1 ]",
+                                                 "Float32", "NCHW", "Max") {}
+};
+
+struct SimpleMaxPoolingFixture2 : Pooling2dFixture
+{
+    SimpleMaxPoolingFixture2() : Pooling2dFixture("[ 1, 1, 2, 2 ]",
+                                                  "[ 1, 1, 1, 1 ]",
+                                                  "QuantisedAsymm8", "NCHW", "Max") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(PoolingQuantisedAsymm8Avg, SimpleAvgPoolingFixture)
+{
+    RunTest<4, armnn::DataType::Float32>(0, { 2, 3, 5, 2 }, { 3 });
+}
+
+BOOST_FIXTURE_TEST_CASE(PoolingFloat32Avg, SimpleAvgPoolingFixture2)
+{
+    RunTest<4, armnn::DataType::QuantisedAsymm8>(0,
+                                                { 20, 40, 60, 80 },
+                                                { 50 });
+}
+
+BOOST_FIXTURE_TEST_CASE(PoolingQuantisedAsymm8Max, SimpleMaxPoolingFixture)
+{
+    RunTest<4, armnn::DataType::Float32>(0, { 2, 5, 5, 2 }, { 5 });
+}
+
+BOOST_FIXTURE_TEST_CASE(PoolingFloat32Max, SimpleMaxPoolingFixture2)
+{
+    RunTest<4, armnn::DataType::QuantisedAsymm8>(0,
+                                                { 20, 40, 60, 80 },
+                                                { 80 });
+}
+
+BOOST_AUTO_TEST_SUITE_END()
+
diff --git a/src/armnnSerializer/Schema.fbs b/src/armnnSerializer/Schema.fbs
index 411b89a..048181a 100644
--- a/src/armnnSerializer/Schema.fbs
+++ b/src/armnnSerializer/Schema.fbs
@@ -17,6 +17,11 @@
     Boolean = 4
 }
 
+enum DataLayout : byte {
+    NHWC = 0,
+    NCHW = 1
+}
+
 table TensorInfo {
     dimensions:[uint];
     dataType:DataType;
@@ -67,7 +72,8 @@
     Input = 1,
     Multiplication = 2,
     Output = 3,
-    Softmax = 4
+    Pooling2d = 4,
+    Softmax = 5
 }
 
 // Base layer table to be used as part of other layers
@@ -97,6 +103,42 @@
     base:LayerBase;
 }
 
+table Pooling2dLayer {
+    base:LayerBase;
+    descriptor:Pooling2dDescriptor;
+}
+
+enum PoolingAlgorithm : byte {
+    Max = 0,
+    Average = 1,
+    L2 = 2
+}
+
+enum OutputShapeRounding : byte {
+    Floor = 0,
+    Ceiling = 1
+}
+
+enum PaddingMethod : byte {
+    IgnoreValue = 0,
+    Exclude = 1
+}
+
+table Pooling2dDescriptor {
+    poolType:PoolingAlgorithm;
+    padLeft:uint;
+    padRight:uint;
+    padTop:uint;
+    padBottom:uint;
+    poolWidth:uint;
+    poolHeight:uint;
+    strideX:uint;
+    strideY:uint;
+    outputShapeRounding:OutputShapeRounding;
+    paddingMethod:PaddingMethod;
+    dataLayout:DataLayout;
+}
+
 table SoftmaxLayer {
     base:LayerBase;
     descriptor:SoftmaxDescriptor;
@@ -115,6 +157,7 @@
     InputLayer,
     MultiplicationLayer,
     OutputLayer,
+    Pooling2dLayer,
     SoftmaxLayer
 }
 
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index ba4b369..57228c4 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -4,9 +4,15 @@
 //
 
 #include "Serializer.hpp"
+
+#include "SerializerUtils.hpp"
+
 #include <armnn/ArmNN.hpp>
+
 #include <iostream>
+
 #include <Schema_generated.h>
+
 #include <flatbuffers/util.h>
 
 using namespace armnn;
@@ -16,25 +22,6 @@
 namespace armnnSerializer
 {
 
-serializer::DataType GetFlatBufferDataType(DataType dataType)
-{
-    switch (dataType)
-    {
-        case DataType::Float32:
-            return serializer::DataType::DataType_Float32;
-        case DataType::Float16:
-            return serializer::DataType::DataType_Float16;
-        case DataType::Signed32:
-            return serializer::DataType::DataType_Signed32;
-        case DataType::QuantisedAsymm8:
-            return serializer::DataType::DataType_QuantisedAsymm8;
-        case DataType::Boolean:
-            return serializer::DataType::DataType_Boolean;
-        default:
-            return serializer::DataType::DataType_Float16;
-    }
-}
-
 uint32_t SerializerVisitor::GetSerializedId(unsigned int guid)
 {
     std::pair<unsigned int, uint32_t> guidPair(guid, m_layerId);
@@ -140,6 +127,33 @@
     CreateAnyLayer(flatBufferSoftmaxLayer.o, serializer::Layer::Layer_SoftmaxLayer);
 }
 
+void SerializerVisitor::VisitPooling2dLayer(const IConnectableLayer* layer,
+                                            const Pooling2dDescriptor& pooling2dDescriptor,
+                                            const char* name)
+{
+    auto fbPooling2dBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_Pooling2d);
+    auto fbPooling2dDescriptor = serializer::CreatePooling2dDescriptor(
+        m_flatBufferBuilder,
+        GetFlatBufferPoolingAlgorithm(pooling2dDescriptor.m_PoolType),
+        pooling2dDescriptor.m_PadLeft,
+        pooling2dDescriptor.m_PadRight,
+        pooling2dDescriptor.m_PadTop,
+        pooling2dDescriptor.m_PadBottom,
+        pooling2dDescriptor.m_PoolWidth,
+        pooling2dDescriptor.m_PoolHeight,
+        pooling2dDescriptor.m_StrideX,
+        pooling2dDescriptor.m_StrideY,
+        GetFlatBufferOutputShapeRounding(pooling2dDescriptor.m_OutputShapeRounding),
+        GetFlatBufferPaddingMethod(pooling2dDescriptor.m_PaddingMethod),
+        GetFlatBufferDataLayout(pooling2dDescriptor.m_DataLayout));
+
+    auto fbPooling2dLayer = serializer::CreatePooling2dLayer(m_flatBufferBuilder,
+                                                             fbPooling2dBaseLayer,
+                                                             fbPooling2dDescriptor);
+
+    CreateAnyLayer(fbPooling2dLayer.o, serializer::Layer::Layer_Pooling2dLayer);
+}
+
 fb::Offset<serializer::LayerBase> SerializerVisitor::CreateLayerBase(const IConnectableLayer* layer,
                                                                      const serializer::LayerType layerType)
 {
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index ec26dc1..169ed09 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -60,6 +60,10 @@
                            const armnn::SoftmaxDescriptor& softmaxDescriptor,
                            const char* name = nullptr) override;
 
+    void VisitPooling2dLayer(const armnn::IConnectableLayer* layer,
+                             const armnn::Pooling2dDescriptor& pooling2dDescriptor,
+                             const char* name = nullptr) override;
+
 private:
 
     /// Creates the Input Slots and Output Slots and LayerBase for the layer.
diff --git a/src/armnnSerializer/SerializerSupport.md b/src/armnnSerializer/SerializerSupport.md
index 617eafb..a94e0ad 100644
--- a/src/armnnSerializer/SerializerSupport.md
+++ b/src/armnnSerializer/SerializerSupport.md
@@ -9,5 +9,6 @@
 * Addition
 * Multiplication
 * Softmax
+* Pooling2d
 
 More machine learning layers will be supported in future releases.
\ No newline at end of file
diff --git a/src/armnnSerializer/SerializerUtils.cpp b/src/armnnSerializer/SerializerUtils.cpp
new file mode 100644
index 0000000..5772eab
--- /dev/null
+++ b/src/armnnSerializer/SerializerUtils.cpp
@@ -0,0 +1,83 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "SerializerUtils.hpp"
+
+namespace armnnSerializer
+{
+
+using namespace armnn;
+namespace serializer = armnn::armnnSerializer;
+
+serializer::DataType GetFlatBufferDataType(DataType dataType)
+{
+    switch (dataType)
+    {
+        case DataType::Float32:
+            return serializer::DataType::DataType_Float32;
+        case DataType::Float16:
+            return serializer::DataType::DataType_Float16;
+        case DataType::Signed32:
+            return serializer::DataType::DataType_Signed32;
+        case DataType::QuantisedAsymm8:
+            return serializer::DataType::DataType_QuantisedAsymm8;
+        case DataType::Boolean:
+            return serializer::DataType::DataType_Boolean;
+        default:
+            return serializer::DataType::DataType_Float16;
+    }
+}
+
+serializer::DataLayout GetFlatBufferDataLayout(DataLayout dataLayout)
+{
+    switch (dataLayout)
+    {
+        case DataLayout::NHWC:
+            return serializer::DataLayout::DataLayout_NHWC;
+        case DataLayout::NCHW:
+        default:
+            return serializer::DataLayout::DataLayout_NCHW;
+    }
+}
+
+serializer::PoolingAlgorithm GetFlatBufferPoolingAlgorithm(PoolingAlgorithm poolingAlgorithm)
+{
+    switch (poolingAlgorithm)
+    {
+        case PoolingAlgorithm::Average:
+            return serializer::PoolingAlgorithm::PoolingAlgorithm_Average;
+        case PoolingAlgorithm::L2:
+            return serializer::PoolingAlgorithm::PoolingAlgorithm_L2;
+        case PoolingAlgorithm::Max:
+        default:
+            return serializer::PoolingAlgorithm::PoolingAlgorithm_Max;
+    }
+}
+
+serializer::OutputShapeRounding GetFlatBufferOutputShapeRounding(OutputShapeRounding outputShapeRounding)
+{
+    switch (outputShapeRounding)
+    {
+        case OutputShapeRounding::Ceiling:
+            return serializer::OutputShapeRounding::OutputShapeRounding_Ceiling;
+        case OutputShapeRounding::Floor:
+        default:
+            return serializer::OutputShapeRounding::OutputShapeRounding_Floor;
+    }
+}
+
+serializer::PaddingMethod GetFlatBufferPaddingMethod(PaddingMethod paddingMethod)
+{
+    switch (paddingMethod)
+    {
+        case PaddingMethod::IgnoreValue:
+            return serializer::PaddingMethod::PaddingMethod_IgnoreValue;
+        case PaddingMethod::Exclude:
+        default:
+            return serializer::PaddingMethod::PaddingMethod_Exclude;
+    }
+}
+
+} // namespace armnnSerializer
\ No newline at end of file
diff --git a/src/armnnSerializer/SerializerUtils.hpp b/src/armnnSerializer/SerializerUtils.hpp
new file mode 100644
index 0000000..72a8806
--- /dev/null
+++ b/src/armnnSerializer/SerializerUtils.hpp
@@ -0,0 +1,25 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/ArmNN.hpp>
+
+#include <Schema_generated.h>
+
+namespace armnnSerializer
+{
+
+armnn::armnnSerializer::DataType GetFlatBufferDataType(armnn::DataType dataType);
+
+armnn::armnnSerializer::DataLayout GetFlatBufferDataLayout(armnn::DataLayout dataLayout);
+
+armnn::armnnSerializer::PoolingAlgorithm GetFlatBufferPoolingAlgorithm(armnn::PoolingAlgorithm poolingAlgorithm);
+
+armnn::armnnSerializer::OutputShapeRounding GetFlatBufferOutputShapeRounding(
+    armnn::OutputShapeRounding outputShapeRounding);
+
+armnn::armnnSerializer::PaddingMethod GetFlatBufferPaddingMethod(armnn::PaddingMethod paddingMethod);
+
+} // namespace armnnSerializer
\ No newline at end of file
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index 5b55682..4b6bf1e 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -15,12 +15,34 @@
 #include <vector>
 
 #include <boost/test/unit_test.hpp>
-
 #include <flatbuffers/idl.h>
 
-BOOST_AUTO_TEST_SUITE(SerializerTests)
+using armnnDeserializeParser::IDeserializeParser;
 
-armnnDeserializeParser::IDeserializeParserPtr g_Parser = armnnDeserializeParser::IDeserializeParser::Create();
+namespace
+{
+
+armnn::INetworkPtr DeserializeNetwork(const std::string& serializerString)
+{
+    std::vector<std::uint8_t> const serializerVector{serializerString.begin(), serializerString.end()};
+    return armnnDeserializeParser::IDeserializeParser::Create()->CreateNetworkFromBinary(serializerVector);
+}
+
+std::string SerializeNetwork(const armnn::INetwork& network)
+{
+    armnnSerializer::Serializer serializer;
+    serializer.Serialize(network);
+
+    std::stringstream stream;
+    serializer.SaveSerializedToStream(stream);
+
+    std::string serializerString{stream.str()};
+    return serializerString;
+}
+
+} // anonymous namespace
+
+BOOST_AUTO_TEST_SUITE(SerializerTests)
 
 BOOST_AUTO_TEST_CASE(SimpleNetworkSerialization)
 {
@@ -78,55 +100,47 @@
 
     // Create test network
     armnn::INetworkPtr network = armnn::INetwork::Create();
-    armnn::IConnectableLayer *const inputLayer   = network->AddInputLayer(0);
-    armnn::IConnectableLayer *const softmaxLayer = network->AddSoftmaxLayer(descriptor, "softmax");
-    armnn::IConnectableLayer *const outputLayer  = network->AddOutputLayer(0);
+    armnn::IConnectableLayer* const inputLayer   = network->AddInputLayer(0);
+    armnn::IConnectableLayer* const softmaxLayer = network->AddSoftmaxLayer(descriptor, "softmax");
+    armnn::IConnectableLayer* const outputLayer  = network->AddOutputLayer(0);
 
     inputLayer->GetOutputSlot(0).Connect(softmaxLayer->GetInputSlot(0));
     inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
     softmaxLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
     softmaxLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
 
-    // Serialize
-    armnnSerializer::Serializer serializer;
-    serializer.Serialize(*network);
-    std::stringstream stream;
-    serializer.SaveSerializedToStream(stream);
-    const std::string serializerString{stream.str()};
-
-    // Deserialize
-    armnn::INetworkPtr deserializedNetwork =
-        g_Parser->CreateNetworkFromBinary({serializerString.begin(), serializerString.end()});
+    // Serialize & deserialize network
+    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
     armnn::IRuntime::CreationOptions options;
-    armnn::IRuntimePtr run = armnn::IRuntime::Create(options);
+    armnn::IRuntimePtr runtime = armnn::IRuntime::Create(options);
 
     armnn::IOptimizedNetworkPtr optimizedNetwork =
-        armnn::Optimize(*network, {armnn::Compute::CpuRef}, run->GetDeviceSpec());
+        armnn::Optimize(*network, {armnn::Compute::CpuRef}, runtime->GetDeviceSpec());
     BOOST_CHECK(optimizedNetwork);
 
     armnn::IOptimizedNetworkPtr deserializedOptimizedNetwork =
-        armnn::Optimize(*deserializedNetwork, {armnn::Compute::CpuRef}, run->GetDeviceSpec());
+        armnn::Optimize(*deserializedNetwork, {armnn::Compute::CpuRef}, runtime->GetDeviceSpec());
     BOOST_CHECK(deserializedOptimizedNetwork);
 
     armnn::NetworkId networkId1;
     armnn::NetworkId networkId2;
 
-    run->LoadNetwork(networkId1, std::move(optimizedNetwork));
-    run->LoadNetwork(networkId2, std::move(deserializedOptimizedNetwork));
+    runtime->LoadNetwork(networkId1, std::move(optimizedNetwork));
+    runtime->LoadNetwork(networkId2, std::move(deserializedOptimizedNetwork));
 
     std::vector<float> inputData(tensorInfo.GetNumElements());
     std::iota(inputData.begin(), inputData.end(), 0);
 
     armnn::InputTensors inputTensors1
     {
-         {0, armnn::ConstTensor(run->GetInputTensorInfo(networkId1, 0), inputData.data())}
+         {0, armnn::ConstTensor(runtime->GetInputTensorInfo(networkId1, 0), inputData.data())}
     };
 
     armnn::InputTensors inputTensors2
     {
-         {0, armnn::ConstTensor(run->GetInputTensorInfo(networkId2, 0), inputData.data())}
+         {0, armnn::ConstTensor(runtime->GetInputTensorInfo(networkId2, 0), inputData.data())}
     };
 
     std::vector<float> outputData1(inputData.size());
@@ -134,19 +148,83 @@
 
     armnn::OutputTensors outputTensors1
     {
-         {0, armnn::Tensor(run->GetOutputTensorInfo(networkId1, 0), outputData1.data())}
+         {0, armnn::Tensor(runtime->GetOutputTensorInfo(networkId1, 0), outputData1.data())}
     };
 
     armnn::OutputTensors outputTensors2
     {
-         {0, armnn::Tensor(run->GetOutputTensorInfo(networkId2, 0), outputData2.data())}
+         {0, armnn::Tensor(runtime->GetOutputTensorInfo(networkId2, 0), outputData2.data())}
     };
 
-    run->EnqueueWorkload(networkId1, inputTensors1, outputTensors1);
-    run->EnqueueWorkload(networkId2, inputTensors2, outputTensors2);
+    runtime->EnqueueWorkload(networkId1, inputTensors1, outputTensors1);
+    runtime->EnqueueWorkload(networkId2, inputTensors2, outputTensors2);
 
     BOOST_CHECK_EQUAL_COLLECTIONS(outputData1.begin(), outputData1.end(),
                                   outputData2.begin(), outputData2.end());
 }
 
+BOOST_AUTO_TEST_CASE(SimplePooling2dIntegration)
+{
+    armnn::NetworkId networkIdentifier;
+    armnn::IRuntime::CreationOptions options; // default options
+    armnn::IRuntimePtr runtime = armnn::IRuntime::Create(options);
+
+    unsigned int inputShape[]  = {1, 2, 2, 1};
+    unsigned int outputShape[] = {1, 1, 1, 1};
+
+    auto inputTensorInfo  = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+    auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
+
+    armnn::Pooling2dDescriptor desc;
+    desc.m_DataLayout          = armnn::DataLayout::NHWC;
+    desc.m_PadTop              = 0;
+    desc.m_PadBottom           = 0;
+    desc.m_PadLeft             = 0;
+    desc.m_PadRight            = 0;
+    desc.m_PoolType            = armnn::PoolingAlgorithm::Average;
+    desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
+    desc.m_PaddingMethod       = armnn::PaddingMethod::Exclude;
+    desc.m_PoolHeight          = 2;
+    desc.m_PoolWidth           = 2;
+    desc.m_StrideX             = 2;
+    desc.m_StrideY             = 2;
+
+    armnn::INetworkPtr network = armnn::INetwork::Create();
+    armnn::IConnectableLayer *const inputLayer = network->AddInputLayer(0);
+    armnn::IConnectableLayer *const pooling2dLayer = network->AddPooling2dLayer(desc, "ReshapeLayer");
+    armnn::IConnectableLayer *const outputLayer = network->AddOutputLayer(0);
+
+    inputLayer->GetOutputSlot(0).Connect(pooling2dLayer->GetInputSlot(0));
+    inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+    pooling2dLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+    pooling2dLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+    auto deserializeNetwork = DeserializeNetwork(SerializeNetwork(*network));
+
+    //Optimize the deserialized network
+    auto deserializedOptimized = Optimize(*deserializeNetwork, {armnn::Compute::CpuRef},
+                                          runtime->GetDeviceSpec());
+
+    // Load graph into runtime
+    runtime->LoadNetwork(networkIdentifier, std::move(deserializedOptimized));
+
+    std::vector<float> input1Data(inputTensorInfo.GetNumElements());
+    std::iota(input1Data.begin(), input1Data.end(), 4);
+
+    armnn::InputTensors inputTensors
+    {
+          {0, armnn::ConstTensor(runtime->GetInputTensorInfo(networkIdentifier, 0), input1Data.data())}
+    };
+
+    std::vector<float> outputData(input1Data.size());
+    armnn::OutputTensors outputTensors
+    {
+           {0, armnn::Tensor(runtime->GetOutputTensorInfo(networkIdentifier, 0), outputData.data())}
+    };
+
+    runtime->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);
+
+    BOOST_CHECK_EQUAL(outputData[0], 5.5);
+}
+
 BOOST_AUTO_TEST_SUITE_END()