IVGCVSW-4307 Onnx Segfault Bug

 * Fixed ParseConstant
 * Fixed Exception
 * Fixed 0 dimension tensor
 * Add unit test to check addition as tensor and scalar
 * Change C++ style comments to Doxygen style

Signed-off-by: Ryan OShea <Ryan.OShea2@arm.com>
Change-Id: Ied7cbe2e11e713c23d87106d2db39e0eb446c920
diff --git a/src/armnnOnnxParser/OnnxParser.cpp b/src/armnnOnnxParser/OnnxParser.cpp
index 89be428..0c1af03 100644
--- a/src/armnnOnnxParser/OnnxParser.cpp
+++ b/src/armnnOnnxParser/OnnxParser.cpp
@@ -234,17 +234,28 @@
       shapeDims.push_back(CHECKED_NON_NEGATIVE(CHECKED_INT32(onnxShape.dim(i).dim_value())));
   }
 
+  if (shapeDims.empty())
+  {
+      shapeDims.push_back(1);
+  }
+
   return ToTensorInfo(info.name(), shapeDims, info.type().tensor_type().elem_type());
 }
 
 armnn::TensorInfo ToTensorInfo(const onnx::TensorProto& tensor)
 {
   std::vector<unsigned int> shapeDims;
+
   for (auto dim: tensor.dims())
   {
       shapeDims.push_back(CHECKED_NON_NEGATIVE(CHECKED_INT32(dim)));
   }
 
+  if (shapeDims.empty())
+  {
+      shapeDims.push_back(1);
+  }
+
   return ToTensorInfo(tensor.name(), shapeDims, tensor.data_type());
 }
 
@@ -624,7 +635,7 @@
         const std::string& operation = node.op_type();
 
         // check which layers we handled already (add and matmul fused as FC)
-        if(operation == "MatMul" )
+        if (operation == "MatMul" )
         {
             if(m_OutputsFusedAndUsed[nodeIndex].inputForNodes != m_OutputsFusedAndUsed[nodeIndex].fusedWithNodes.size())
             {
@@ -881,7 +892,6 @@
 void OnnxParser::ParseConstant(const onnx::NodeProto& node)
 {
     CHECK_VALID_SIZE(static_cast<size_t>(node.attribute_size()), 1);
-
     if (!node.attribute(0).has_t())
     {
         throw ParseException(boost::str(
@@ -897,9 +907,10 @@
 
     //Register this as a m_ConstParam so we know we can use it as a constant param in future layers.
     m_TensorsInfo[node.output(0)].m_tensor = std::make_unique<const onnx::TensorProto>(onnxTensor);
+    m_TensorsInfo[node.output(0)].m_info = std::make_unique<TensorInfo>(ToTensorInfo(onnxTensor));
+    m_TensorsInfo[node.output(0)].m_dtype = static_cast<onnx::TensorProto::DataType>(onnxTensor.data_type());
 
     CreateConstantLayer(node.output(0), node.name());
-
 }
 
 void OnnxParser::ParseMaxPool(const onnx::NodeProto& node)
@@ -1505,11 +1516,9 @@
 
      // register the input connection -> for constant inputs, we need to make a newDim constant layer
      if(m_TensorsInfo[inputs.first].isConstant()) {
-
          CreateConstantLayer(inputs.first, boost::str(boost::format("Add:constant_of_%1%") % node.input(0)));
      }
      if(m_TensorsInfo[inputs.second].isConstant()) {
-
          CreateConstantLayer(inputs.second, boost::str(boost::format("Add:constant_of_%1%") % node.input(1)));
      }
      RegisterInputSlots(layer, {inputs.first, inputs.second});
@@ -1653,16 +1662,16 @@
             m_TensorConnections[tensorId] = TensorSlots();
         }
 
-        TensorSlots & tensorSlots = m_TensorConnections[tensorId];
+        TensorSlots& tensorSlots = m_TensorConnections[tensorId];
 
         // assuming there is only one producer for that tensor
         if (tensorSlots.outputSlot != nullptr)
         {
             throw ParseException(boost::str(
                     boost::format("Another layer has already registered itself as the producer of "
-                                  "tensor:%2% %3%") %
-                                   tensorId %
-                                   CHECK_LOCATION().AsString()));
+                                  "tensor:%1% %2%")
+                                  % tensorId
+                                  % CHECK_LOCATION().AsString()));
         }
         tensorSlots.outputSlot = slot;
     }
diff --git a/src/armnnOnnxParser/OnnxParser.hpp b/src/armnnOnnxParser/OnnxParser.hpp
index a467180..f9fa6d9 100644
--- a/src/armnnOnnxParser/OnnxParser.hpp
+++ b/src/armnnOnnxParser/OnnxParser.hpp
@@ -54,10 +54,10 @@
     static ModelPtr LoadModelFromTextFile(const char * fileName);
     static ModelPtr LoadModelFromString(const std::string& inputString);
 
-    ///Retrieve inputs names
+    /// Retrieve inputs names
     static std::vector<std::string> GetInputs(ModelPtr& model);
 
-    ///Retrieve outputs names
+    /// Retrieve outputs names
     static std::vector<std::string> GetOutputs(ModelPtr& model);
 
 private:
@@ -65,7 +65,7 @@
     /// Parses a ModelProto loaded into memory from one of the other CreateNetwork*
     armnn::INetworkPtr CreateNetworkFromModel(onnx::ModelProto& model);
 
-    ///Parse every node and make the connection between the resulting tensors
+    /// Parse every node and make the connection between the resulting tensors
     void LoadGraph();
 
     void SetupInfo(const google::protobuf::RepeatedPtrField<onnx::ValueInfoProto >* list);
@@ -136,10 +136,10 @@
     /// The network we're building. Gets cleared after it is passed to the user
     armnn::INetworkPtr m_Network;
 
-    ///Ptr to the graph we're building the network from
+    /// Ptr to the graph we're building the network from
     GraphPtr m_Graph;
 
-    ///Map of the information for every tensor
+    /// Map of the information for every tensor
     struct OnnxTensor
     {
         std::unique_ptr<armnn::TensorInfo>          m_info;
@@ -148,7 +148,6 @@
 
         OnnxTensor() : m_info(nullptr), m_tensor(nullptr), m_dtype(onnx::TensorProto::FLOAT) { }
         bool isConstant() { return m_tensor != nullptr; }
-
     };
 
     std::unordered_map<std::string, OnnxTensor> m_TensorsInfo;
@@ -166,10 +165,10 @@
 
         TensorSlots() : outputSlot(nullptr) { }
     };
-    ///Map of the tensor names to their connections for the connections of the layers of the graph
+    /// Map of the tensor names to their connections for the connections of the layers of the graph
     std::unordered_map<std::string, TensorSlots> m_TensorConnections;
 
-    //Map of the tensor names to their node and index in graph.node()
+    /// Map of the tensor names to their node and index in graph.node()
     std::unordered_map<std::string, std::pair<const onnx::NodeProto*, int>> m_OutputsMap;
 
     /// Number of times a specific node (identified by his index number) was used as input
diff --git a/src/armnnOnnxParser/test/Addition.cpp b/src/armnnOnnxParser/test/Addition.cpp
index 993a620..6fc8eb1 100644
--- a/src/armnnOnnxParser/test/Addition.cpp
+++ b/src/armnnOnnxParser/test/Addition.cpp
@@ -286,6 +286,103 @@
     }
 };
 
+struct AddScalarFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
+{
+    AddScalarFixture(const std::string& dataType)
+    {
+        m_Prototext = R"(
+                   ir_version: 3
+                   producer_name:  "CNTK"
+                   producer_version:  "2.5.1"
+                   domain:  "ai.cntk"
+                   model_version: 1
+                   graph {
+                     name:  "CNTKGraph"
+                     input {
+                        name: "Input0"
+                        type {
+                          tensor_type {
+                            elem_type: )" + dataType + R"(
+                            shape {
+                              dim {
+                                dim_value: 1
+                              }
+                              dim {
+                                dim_value: 1
+                              }
+                              dim {
+                                dim_value: 2
+                              }
+                              dim {
+                                dim_value: 2
+                              }
+                            }
+                          }
+                        }
+                      }
+                      input {
+                         name: "Input1"
+                         type {
+                           tensor_type {
+                             elem_type: )" + dataType + R"(
+                             shape {
+                               dim {
+                                 dim_value: 1
+                               }
+                             }
+                           }
+                         }
+                       }
+                       node {
+                            input: "Input0"
+                            input: "Input1"
+                            output: "Output"
+                            name: "addition"
+                            op_type: "Add"
+                            doc_string: ""
+                            domain: ""
+                          }
+                          output {
+                              name: "Output"
+                              type {
+                                 tensor_type {
+                                   elem_type: 1
+                                   shape {
+                                       dim {
+                                           dim_value: 1
+                                       }
+                                       dim {
+                                           dim_value: 1
+                                       }
+                                       dim {
+                                           dim_value: 2
+                                       }
+                                       dim {
+                                           dim_value: 2
+                                       }
+                                   }
+                                }
+                            }
+                        }
+                    }
+                   opset_import {
+                      version: 7
+                    })";
+    }
+};
+
+struct AddValidScalarFixture : AddScalarFixture
+{
+    AddValidScalarFixture() : AddScalarFixture("1") {
+        Setup();
+    }
+};
+
+struct AddInvalidScalarFixture : AddScalarFixture
+{
+    AddInvalidScalarFixture() : AddScalarFixture("6") { }
+};
+
 BOOST_FIXTURE_TEST_CASE(ValidAddTest, AddValidFixture)
 {
     RunTest<4>({{"Input0", {1.0f, 2.0f, -3.0f, -4.0f}},
@@ -308,4 +405,15 @@
                 {"Input1", {1.0f, 2.0f, 3.0, 4.0f}}}, {{"Output", {2.0, 4.0, 0, 0.0}}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+BOOST_FIXTURE_TEST_CASE(ValidAddScalarTest, AddValidScalarFixture)
+{
+    RunTest<4>({{"Input0", {1.0f, 2.0f, -3.0f, -4.0f}},
+                {"Input1", {-8.0f}}}, {{"Output", {-7.0, -6.0, -11.0, -12.0}}});
+}
+
+BOOST_FIXTURE_TEST_CASE(IncorrectDataTypeAddScalar, AddInvalidScalarFixture)
+{
+    BOOST_CHECK_THROW(Setup(), armnn::ParseException);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file