Refactor inlined utility functions in NetworkUtils

Change-Id: I26fb13717f64c942b2648702c52be18be856042d
diff --git a/Android.mk b/Android.mk
index 988d6f0..3eeddcb 100644
--- a/Android.mk
+++ b/Android.mk
@@ -122,6 +122,7 @@
         src/armnn/Layer.cpp \
         src/armnn/LoadedNetwork.cpp \
         src/armnn/Network.cpp \
+        src/armnn/NetworkUtils.cpp \
         src/armnn/WallClockTimer.cpp \
         src/armnn/ProfilingEvent.cpp \
         src/armnn/Profiling.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index c6af71b..ce04bba 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -256,6 +256,7 @@
     src/armnn/LoadedNetwork.hpp
     src/armnn/Network.cpp
     src/armnn/Network.hpp
+    src/armnn/NetworkUtils.cpp
     src/armnn/NetworkUtils.hpp
     src/armnn/Observable.cpp
     src/armnn/Observable.hpp
diff --git a/src/armnn/NetworkUtils.cpp b/src/armnn/NetworkUtils.cpp
new file mode 100644
index 0000000..1e3add6
--- /dev/null
+++ b/src/armnn/NetworkUtils.cpp
@@ -0,0 +1,77 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NetworkUtils.hpp"
+
+namespace armnn
+{
+
+std::vector<ConvertFp16ToFp32Layer*> InsertConvertFp16ToFp32LayersBefore(Graph& graph, Layer& layer)
+{
+    std::vector<ConvertFp16ToFp32Layer*> convertLayers;
+    convertLayers.reserve(layer.GetNumInputSlots());
+
+    for (auto&& inputSlot = layer.BeginInputSlots(); inputSlot != layer.EndInputSlots(); ++inputSlot)
+    {
+        // Insert FP16 to FP32 converter layer before the layer
+        const std::string name =
+            std::string("convert_fp16_to_fp32-" + std::to_string(inputSlot->GetSlotIndex()) + "-") + layer.GetName();
+        ConvertFp16ToFp32Layer* convertLayer =
+            graph.InsertNewLayer<ConvertFp16ToFp32Layer>(*inputSlot, name.c_str());
+
+        // Sets output tensor info for the convert layer
+        TensorInfo convertInfo = convertLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
+        convertInfo.SetDataType(DataType::Float32);
+
+        convertLayer->GetOutputSlot().SetTensorInfo(convertInfo);
+
+        convertLayers.emplace_back(convertLayer);
+    }
+
+    // Sets the output tensor info for the unsupported layer
+    auto UpdateTensorInfo = [](auto& outputSlot)
+    {
+        // Copy original tensor info and change data type to FP32
+        TensorInfo newTensorInfo = outputSlot.GetTensorInfo();
+        newTensorInfo.SetDataType(DataType::Float32);
+
+        outputSlot.SetTensorInfo(newTensorInfo);
+    };
+
+    std::for_each(layer.BeginOutputSlots(), layer.EndOutputSlots(), UpdateTensorInfo);
+
+    return convertLayers;
+}
+
+std::vector<ConvertFp32ToFp16Layer*> InsertConvertFp32ToFp16LayersAfter(Graph& graph, Layer& layer)
+{
+    std::vector<ConvertFp32ToFp16Layer*> convertLayers;
+    convertLayers.reserve(layer.GetNumOutputSlots());
+
+    int index = 0;
+    // Change outputs to DataType::Float16
+    for (auto&& outputSlot = layer.BeginOutputSlots(); outputSlot != layer.EndOutputSlots(); ++outputSlot)
+    {
+        BOOST_ASSERT(outputSlot->GetTensorInfo().GetDataType() == DataType::Float32);
+
+        // Insert FP32 to FP16 converter layer after the layer
+        const std::string name =
+            std::string("convert_fp32_to_fp16-" + std::to_string(index++) + "-") + layer.GetName();
+        ConvertFp32ToFp16Layer* convertLayer =
+            graph.InsertNewLayer<ConvertFp32ToFp16Layer>(*outputSlot, name.c_str());
+
+        // Sets output tensor info for the convert layer.
+        TensorInfo convertInfo = convertLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
+        convertInfo.SetDataType(DataType::Float16);
+
+        convertLayer->GetOutputSlot().SetTensorInfo(convertInfo);
+
+        convertLayers.emplace_back(convertLayer);
+    }
+
+    return convertLayers;
+}
+
+} // namespace armnn
diff --git a/src/armnn/NetworkUtils.hpp b/src/armnn/NetworkUtils.hpp
index 60cbf27..dbb8538 100644
--- a/src/armnn/NetworkUtils.hpp
+++ b/src/armnn/NetworkUtils.hpp
@@ -10,70 +10,8 @@
 namespace armnn
 {
 
-inline std::vector<ConvertFp16ToFp32Layer*> InsertConvertFp16ToFp32LayersBefore(Graph& graph, Layer& layer)
-{
-    std::vector<ConvertFp16ToFp32Layer*> convertLayers;
-    convertLayers.reserve(layer.GetNumInputSlots());
+std::vector<ConvertFp16ToFp32Layer*> InsertConvertFp16ToFp32LayersBefore(Graph& graph, Layer& layer);
 
-    for (auto&& inputSlot = layer.BeginInputSlots(); inputSlot != layer.EndInputSlots(); ++inputSlot)
-    {
-        // Insert FP16 to FP32 converter layer before the layer
-        const std::string name =
-            std::string("convert_fp16_to_fp32-" + std::to_string(inputSlot->GetSlotIndex()) + "-") + layer.GetName();
-        ConvertFp16ToFp32Layer* convertLayer =
-            graph.InsertNewLayer<ConvertFp16ToFp32Layer>(*inputSlot, name.c_str());
+std::vector<ConvertFp32ToFp16Layer*> InsertConvertFp32ToFp16LayersAfter(Graph& graph, Layer& layer);
 
-        // Sets output tensor info for the convert layer
-        TensorInfo convertInfo = convertLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
-        convertInfo.SetDataType(DataType::Float32);
-
-        convertLayer->GetOutputSlot().SetTensorInfo(convertInfo);
-
-        convertLayers.emplace_back(convertLayer);
-    }
-
-    // Sets the output tensor info for the unsupported layer
-    auto UpdateTensorInfo = [](auto& outputSlot)
-    {
-        // Copy original tensor info and change data type to FP32
-        TensorInfo newTensorInfo = outputSlot.GetTensorInfo();
-        newTensorInfo.SetDataType(DataType::Float32);
-
-        outputSlot.SetTensorInfo(newTensorInfo);
-    };
-
-    std::for_each(layer.BeginOutputSlots(), layer.EndOutputSlots(), UpdateTensorInfo);
-
-    return convertLayers;
-}
-
-inline std::vector<ConvertFp32ToFp16Layer*> InsertConvertFp32ToFp16LayersAfter(Graph& graph, Layer& layer)
-{
-    std::vector<ConvertFp32ToFp16Layer*> convertLayers;
-    convertLayers.reserve(layer.GetNumOutputSlots());
-
-    int index = 0;
-    // Change outputs to DataType::Float16
-    for (auto&& outputSlot = layer.BeginOutputSlots(); outputSlot != layer.EndOutputSlots(); ++outputSlot)
-    {
-        BOOST_ASSERT(outputSlot->GetTensorInfo().GetDataType() == DataType::Float32);
-
-        // Insert FP32 to FP16 converter layer after the layer
-        const std::string name =
-            std::string("convert_fp32_to_fp16-" + std::to_string(index++) + "-") + layer.GetName();
-        ConvertFp32ToFp16Layer* convertLayer =
-            graph.InsertNewLayer<ConvertFp32ToFp16Layer>(*outputSlot, name.c_str());
-
-        // Sets output tensor info for the convert layer.
-        TensorInfo convertInfo = convertLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
-        convertInfo.SetDataType(DataType::Float16);
-
-        convertLayer->GetOutputSlot().SetTensorInfo(convertInfo);
-
-        convertLayers.emplace_back(convertLayer);
-    }
-
-    return convertLayers;
-}
-
-} //namespace armnn
\ No newline at end of file
+} // namespace armnn