IVGCVSW-4007 Add StandInLayer for unsupported operations in TfLiteParser

* Fixed bug in custom operator support that caused all custom operators
  to be parsed as a DetectionPostProcessLayer
* Added support for handling unsupported operators (built-in or custom)
  by replacing them with a StandInLayer in the generated network
* Added options to TfLiteParser to control whether we want to use
  StandInLayers when we encounter unsupported operators, or we prefer
  to throw a ParserException as until now

Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: I125a63016c7c510b1fdde6033842db4f276718c4
diff --git a/CMakeLists.txt b/CMakeLists.txt
index e3b1f8f..b451d9c 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -711,6 +711,7 @@
              src/armnnTfLiteParser/test/TransposeConv.cpp
              src/armnnTfLiteParser/test/Transpose.cpp
              src/armnnTfLiteParser/test/Unpack.cpp
+             src/armnnTfLiteParser/test/Unsupported.cpp
              src/armnnTfLiteParser/test/LoadModel.cpp
              src/armnnTfLiteParser/test/GetBuffer.cpp
              src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp
diff --git a/include/armnnTfLiteParser/ITfLiteParser.hpp b/include/armnnTfLiteParser/ITfLiteParser.hpp
index 36b9246..de1eae7 100644
--- a/include/armnnTfLiteParser/ITfLiteParser.hpp
+++ b/include/armnnTfLiteParser/ITfLiteParser.hpp
@@ -8,6 +8,7 @@
 #include "armnn/NetworkFwd.hpp"
 #include "armnn/Tensor.hpp"
 #include "armnn/INetwork.hpp"
+#include "armnn/Optional.hpp"
 
 #include <memory>
 #include <map>
@@ -24,8 +25,16 @@
 class ITfLiteParser
 {
 public:
-    static ITfLiteParser* CreateRaw();
-    static ITfLiteParserPtr Create();
+    struct TfLiteParserOptions
+    {
+        TfLiteParserOptions()
+            : m_StandInLayerForUnsupported(false) {}
+
+        bool m_StandInLayerForUnsupported;
+    };
+
+    static ITfLiteParser* CreateRaw(const armnn::Optional<TfLiteParserOptions>& options = armnn::EmptyOptional());
+    static ITfLiteParserPtr Create(const armnn::Optional<TfLiteParserOptions>& options = armnn::EmptyOptional());
     static void Destroy(ITfLiteParser* parser);
 
     /// Create the network from a flatbuffers binary file on disk
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index e04f9ad..937131c 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -427,42 +427,46 @@
 
 } // <anonymous>
 
-TfLiteParser::TfLiteParser()
-: m_Network(nullptr, nullptr)
+TfLiteParser::TfLiteParser(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
+: m_Options(options)
+, m_Network(nullptr, nullptr)
 , m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
 {
     // register supported operators
-    m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D]   =  &TfLiteParser::ParseAveragePool2D;
-    m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] =  &TfLiteParser::ParseBatchToSpaceND;
-    m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION]     =  &TfLiteParser::ParseConcatenation;
-    m_ParserFunctions[tflite::BuiltinOperator_CONV_2D]           =  &TfLiteParser::ParseConv2D;
-    m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] =  &TfLiteParser::ParseDepthwiseConv2D;
-    m_ParserFunctions[tflite::BuiltinOperator_CUSTOM]            =  &TfLiteParser::ParseDetectionPostProcess;
-    m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED]   =  &TfLiteParser::ParseFullyConnected;
-    m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC]          =  &TfLiteParser::ParseLogistic;
-    m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION]  =  &TfLiteParser::ParseL2Normalization;
-    m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D]       =  &TfLiteParser::ParseMaxPool2D;
-    m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM]           =  &TfLiteParser::ParseMaximum;
-    m_ParserFunctions[tflite::BuiltinOperator_MINIMUM]           =  &TfLiteParser::ParseMinimum;
-    m_ParserFunctions[tflite::BuiltinOperator_RELU]              =  &TfLiteParser::ParseRelu;
-    m_ParserFunctions[tflite::BuiltinOperator_RELU6]             =  &TfLiteParser::ParseRelu6;
-    m_ParserFunctions[tflite::BuiltinOperator_RESHAPE]           =  &TfLiteParser::ParseReshape;
-    m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR]   =  &TfLiteParser::ParseResizeBilinear;
-    m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX]           =  &TfLiteParser::ParseSoftmax;
-    m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] =  &TfLiteParser::ParseSpaceToBatchND;
-    m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE]           =  &TfLiteParser::ParseSqueeze;
-    m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE]     =  &TfLiteParser::ParseStridedSlice;
-    m_ParserFunctions[tflite::BuiltinOperator_SUB]               =  &TfLiteParser::ParseSub;
-    m_ParserFunctions[tflite::BuiltinOperator_ADD]               =  &TfLiteParser::ParseAdd;
-    m_ParserFunctions[tflite::BuiltinOperator_MUL]               =  &TfLiteParser::ParseMul;
-    m_ParserFunctions[tflite::BuiltinOperator_MEAN]              =  &TfLiteParser::ParseMean;
-    m_ParserFunctions[tflite::BuiltinOperator_PACK]              =  &TfLiteParser::ParsePack;
-    m_ParserFunctions[tflite::BuiltinOperator_PAD]               =  &TfLiteParser::ParsePad;
-    m_ParserFunctions[tflite::BuiltinOperator_SPLIT]             =  &TfLiteParser::ParseSplit;
-    m_ParserFunctions[tflite::BuiltinOperator_TANH]              =  &TfLiteParser::ParseTanH;
-    m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE]         =  &TfLiteParser::ParseTranspose;
-    m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV]    =  &TfLiteParser::ParseTransposeConv;
-    m_ParserFunctions[tflite::BuiltinOperator_UNPACK]            =  &TfLiteParser::ParseUnpack;
+    m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D]   = &TfLiteParser::ParseAveragePool2D;
+    m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
+    m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION]     = &TfLiteParser::ParseConcatenation;
+    m_ParserFunctions[tflite::BuiltinOperator_CONV_2D]           = &TfLiteParser::ParseConv2D;
+    m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
+    m_ParserFunctions[tflite::BuiltinOperator_CUSTOM]            = &TfLiteParser::ParseCustomOperator;
+    m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED]   = &TfLiteParser::ParseFullyConnected;
+    m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC]          = &TfLiteParser::ParseLogistic;
+    m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION]  = &TfLiteParser::ParseL2Normalization;
+    m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D]       = &TfLiteParser::ParseMaxPool2D;
+    m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM]           = &TfLiteParser::ParseMaximum;
+    m_ParserFunctions[tflite::BuiltinOperator_MINIMUM]           = &TfLiteParser::ParseMinimum;
+    m_ParserFunctions[tflite::BuiltinOperator_RELU]              = &TfLiteParser::ParseRelu;
+    m_ParserFunctions[tflite::BuiltinOperator_RELU6]             = &TfLiteParser::ParseRelu6;
+    m_ParserFunctions[tflite::BuiltinOperator_RESHAPE]           = &TfLiteParser::ParseReshape;
+    m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR]   = &TfLiteParser::ParseResizeBilinear;
+    m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX]           = &TfLiteParser::ParseSoftmax;
+    m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
+    m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE]           = &TfLiteParser::ParseSqueeze;
+    m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE]     = &TfLiteParser::ParseStridedSlice;
+    m_ParserFunctions[tflite::BuiltinOperator_SUB]               = &TfLiteParser::ParseSub;
+    m_ParserFunctions[tflite::BuiltinOperator_ADD]               = &TfLiteParser::ParseAdd;
+    m_ParserFunctions[tflite::BuiltinOperator_MUL]               = &TfLiteParser::ParseMul;
+    m_ParserFunctions[tflite::BuiltinOperator_MEAN]              = &TfLiteParser::ParseMean;
+    m_ParserFunctions[tflite::BuiltinOperator_PACK]              = &TfLiteParser::ParsePack;
+    m_ParserFunctions[tflite::BuiltinOperator_PAD]               = &TfLiteParser::ParsePad;
+    m_ParserFunctions[tflite::BuiltinOperator_SPLIT]             = &TfLiteParser::ParseSplit;
+    m_ParserFunctions[tflite::BuiltinOperator_TANH]              = &TfLiteParser::ParseTanH;
+    m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE]         = &TfLiteParser::ParseTranspose;
+    m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV]    = &TfLiteParser::ParseTransposeConv;
+    m_ParserFunctions[tflite::BuiltinOperator_UNPACK]            = &TfLiteParser::ParseUnpack;
+
+    // register supported custom operators
+    m_CustomParserFunctions["TFLite_Detection_PostProcess"]      = &TfLiteParser::ParseDetectionPostProcess;
 }
 
 void TfLiteParser::ResetParser()
@@ -675,25 +679,74 @@
     tensorSlots.inputSlots.push_back(slot);
 }
 
+void TfLiteParser::ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex)
+{
+    CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+    // NOTE: By default we presume the custom operator is not supported
+    auto customParserFunction = &TfLiteParser::ParseUnsupportedOperator;
+
+    // Identify custom code defined for custom operator
+    const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
+    const auto& customCode  = m_Model->operator_codes[operatorPtr->opcode_index]->custom_code;
+
+    // Find parser function that correspondes to custom code (if any)
+    auto iterator = m_CustomParserFunctions.find(customCode);
+    if (iterator != m_CustomParserFunctions.end())
+    {
+        customParserFunction = iterator->second;
+    }
+
+    // Run parser function
+    (this->*customParserFunction)(subgraphIndex, operatorIndex);
+}
+
 void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
-    const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
-    //
-    auto opcodeIndex = operatorPtr->opcode_index;
-    auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
 
-    throw ParseException(
-        boost::str(
-            boost::format("Operator not supported. "
-                          "subgraph:%1% operator:%2% "
-                          "opcode_index:%3% opcode:%4% / %5% %6%") %
-                          subgraphIndex %
-                          operatorIndex %
-                          opcodeIndex %
-                          opcode %
-                          tflite::EnumNameBuiltinOperator(opcode) %
-                          CHECK_LOCATION().AsString()));
+    const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
+
+    auto opcodeIndex = operatorPtr->opcode_index;
+    auto opcode      = m_Model->operator_codes[opcodeIndex]->builtin_code;
+
+    if (!m_Options || !m_Options.value().m_StandInLayerForUnsupported)
+    {
+        // Do not add StandInLayer, throw ParseException instead
+        throw ParseException(
+            boost::str(
+                boost::format("Operator not supported. "
+                              "subgraph:%1% operator:%2% "
+                              "opcode_index:%3% opcode:%4% / %5% %6%") %
+                              subgraphIndex %
+                              operatorIndex %
+                              opcodeIndex %
+                              opcode %
+                              tflite::EnumNameBuiltinOperator(opcode) %
+                              CHECK_LOCATION().AsString()));
+    }
+
+    auto inputs  = GetInputs(m_Model, subgraphIndex, operatorIndex);
+    auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+
+    const unsigned int numInputs  = boost::numeric_cast<unsigned int>(inputs.size());
+    const unsigned int numOutputs = boost::numeric_cast<unsigned int>(outputs.size());
+
+    StandInDescriptor descriptor(numInputs, numOutputs);
+    auto layerName = boost::str(boost::format("StandIn:%1%:%2%:%3%") % subgraphIndex % operatorIndex % opcode);
+
+    // Add a non-executable StandInLayer as a placeholder for any unsupported operator
+    IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
+    for (unsigned int i = 0u; i < numOutputs; ++i)
+    {
+        layer->GetOutputSlot(i).SetTensorInfo(ToTensorInfo(outputs[i]));
+    }
+
+    auto inputTensorIds  = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+    auto outputTensorIds = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+
+    RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIds);
+    RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIds);
 }
 
 void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
@@ -2761,14 +2814,14 @@
     return result;
 }
 
-ITfLiteParser* ITfLiteParser::CreateRaw()
+ITfLiteParser* ITfLiteParser::CreateRaw(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
 {
-    return new TfLiteParser();
+    return new TfLiteParser(options);
 }
 
-ITfLiteParserPtr ITfLiteParser::Create()
+ITfLiteParserPtr ITfLiteParser::Create(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
 {
-    return ITfLiteParserPtr(CreateRaw(), &ITfLiteParser::Destroy);
+    return ITfLiteParserPtr(CreateRaw(options), &ITfLiteParser::Destroy);
 }
 
 void ITfLiteParser::Destroy(ITfLiteParser* parser)
diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp
index fac2599..fb01fe8 100644
--- a/src/armnnTfLiteParser/TfLiteParser.hpp
+++ b/src/armnnTfLiteParser/TfLiteParser.hpp
@@ -10,6 +10,7 @@
 
 #include <schema_generated.h>
 #include <functional>
+#include <unordered_map>
 #include <vector>
 
 namespace armnnTfLiteParser
@@ -58,7 +59,7 @@
     /// Return the output tensor names for a given subgraph
     virtual std::vector<std::string> GetSubgraphOutputTensorNames(size_t subgraphId) const override;
 
-    TfLiteParser();
+    TfLiteParser(const armnn::Optional<ITfLiteParser::TfLiteParserOptions>& options = armnn::EmptyOptional());
     virtual ~TfLiteParser() {}
 
 public:
@@ -89,7 +90,9 @@
     // signature for the parser functions
     using OperatorParsingFunction = void(TfLiteParser::*)(size_t subgraphIndex, size_t operatorIndex);
 
+    void ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex);
     void ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex);
+
     void ParseActivation(size_t subgraphIndex, size_t operatorIndex, armnn::ActivationFunction activationType);
     void ParseAdd(size_t subgraphIndex, size_t operatorIndex);
     void ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex);
@@ -180,11 +183,16 @@
                       armnn::TensorInfo& tensorInfo,
                       armnn::Optional<armnn::PermutationVector&> permutationVector);
 
+    // Settings for configuring the TfLiteParser
+    armnn::Optional<ITfLiteParser::TfLiteParserOptions> m_Options;
+
     /// The network we're building. Gets cleared after it is passed to the user
     armnn::INetworkPtr                    m_Network;
-    std::vector<OperatorParsingFunction>  m_ParserFunctions;
     ModelPtr                              m_Model;
 
+    std::vector<OperatorParsingFunction>                     m_ParserFunctions;
+    std::unordered_map<std::string, OperatorParsingFunction> m_CustomParserFunctions;
+
     /// A mapping of an output slot to each of the input slots it should be connected to
     /// The outputSlot is from the layer that creates this tensor as one of its ouputs
     /// The inputSlots are from the layers that use this tensor as one of their inputs
diff --git a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
index b20bea2..0c64280 100644
--- a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
+++ b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
@@ -29,22 +29,27 @@
 #include <iostream>
 
 using armnnTfLiteParser::ITfLiteParser;
-using TensorRawPtr = const tflite::TensorT *;
+using armnnTfLiteParser::ITfLiteParserPtr;
 
+using TensorRawPtr = const tflite::TensorT *;
 struct ParserFlatbuffersFixture
 {
     ParserFlatbuffersFixture() :
-        m_Parser(ITfLiteParser::Create()),
+        m_Parser(nullptr, &ITfLiteParser::Destroy),
         m_Runtime(armnn::IRuntime::Create(armnn::IRuntime::CreationOptions())),
         m_NetworkIdentifier(-1)
     {
+        ITfLiteParser::TfLiteParserOptions options;
+        options.m_StandInLayerForUnsupported = true;
+
+        m_Parser.reset(ITfLiteParser::CreateRaw(armnn::Optional<ITfLiteParser::TfLiteParserOptions>(options)));
     }
 
     std::vector<uint8_t> m_GraphBinary;
-    std::string m_JsonString;
-    std::unique_ptr<ITfLiteParser, void (*)(ITfLiteParser *parser)> m_Parser;
-    armnn::IRuntimePtr m_Runtime;
-    armnn::NetworkId m_NetworkIdentifier;
+    std::string          m_JsonString;
+    ITfLiteParserPtr     m_Parser;
+    armnn::IRuntimePtr   m_Runtime;
+    armnn::NetworkId     m_NetworkIdentifier;
 
     /// If the single-input-single-output overload of Setup() is called, these will store the input and output name
     /// so they don't need to be passed to the single-input-single-output overload of RunTest().
@@ -346,4 +351,4 @@
             }
         }
     }
-}
\ No newline at end of file
+}
diff --git a/src/armnnTfLiteParser/test/Unsupported.cpp b/src/armnnTfLiteParser/test/Unsupported.cpp
new file mode 100644
index 0000000..25abde8
--- /dev/null
+++ b/src/armnnTfLiteParser/test/Unsupported.cpp
@@ -0,0 +1,249 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+#include <armnn/LayerVisitorBase.hpp>
+
+#include <layers/StandInLayer.hpp>
+
+#include <boost/assert.hpp>
+#include <boost/polymorphic_cast.hpp>
+#include <boost/test/unit_test.hpp>
+
+#include <sstream>
+#include <string>
+#include <vector>
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+using namespace armnn;
+
+class StandInLayerVerifier : public LayerVisitorBase<VisitorThrowingPolicy>
+{
+public:
+    StandInLayerVerifier(const std::vector<TensorInfo>& inputInfos,
+                         const std::vector<TensorInfo>& outputInfos)
+        : LayerVisitorBase<VisitorThrowingPolicy>()
+        , m_InputInfos(inputInfos)
+        , m_OutputInfos(outputInfos) {}
+
+    void VisitInputLayer(const IConnectableLayer*, LayerBindingId, const char*) override {}
+
+    void VisitOutputLayer(const IConnectableLayer*, LayerBindingId id, const char*) override {}
+
+    void VisitStandInLayer(const IConnectableLayer* layer,
+                           const StandInDescriptor& descriptor,
+                           const char*) override
+    {
+        unsigned int numInputs = boost::numeric_cast<unsigned int>(m_InputInfos.size());
+        BOOST_CHECK(descriptor.m_NumInputs    == numInputs);
+        BOOST_CHECK(layer->GetNumInputSlots() == numInputs);
+
+        unsigned int numOutputs = boost::numeric_cast<unsigned int>(m_OutputInfos.size());
+        BOOST_CHECK(descriptor.m_NumOutputs    == numOutputs);
+        BOOST_CHECK(layer->GetNumOutputSlots() == numOutputs);
+
+        const StandInLayer* standInLayer = boost::polymorphic_downcast<const StandInLayer*>(layer);
+        for (unsigned int i = 0u; i < numInputs; ++i)
+        {
+            const OutputSlot* connectedSlot = standInLayer->GetInputSlot(i).GetConnectedOutputSlot();
+            BOOST_CHECK(connectedSlot != nullptr);
+
+            const TensorInfo& inputInfo = connectedSlot->GetTensorInfo();
+            BOOST_CHECK(inputInfo == m_InputInfos[i]);
+        }
+
+        for (unsigned int i = 0u; i < numOutputs; ++i)
+        {
+            const TensorInfo& outputInfo = layer->GetOutputSlot(i).GetTensorInfo();
+            BOOST_CHECK(outputInfo == m_OutputInfos[i]);
+        }
+    }
+
+private:
+    std::vector<TensorInfo> m_InputInfos;
+    std::vector<TensorInfo> m_OutputInfos;
+};
+
+class DummyCustomFixture : public ParserFlatbuffersFixture
+{
+public:
+    explicit DummyCustomFixture(const std::vector<TensorInfo>& inputInfos,
+                                const std::vector<TensorInfo>& outputInfos)
+        : ParserFlatbuffersFixture()
+        , m_StandInLayerVerifier(inputInfos, outputInfos)
+    {
+        const unsigned int numInputs = boost::numeric_cast<unsigned int>(inputInfos.size());
+        BOOST_ASSERT(numInputs > 0);
+
+        const unsigned int numOutputs = boost::numeric_cast<unsigned int>(outputInfos.size());
+        BOOST_ASSERT(numOutputs > 0);
+
+        m_JsonString = R"(
+            {
+                "version": 3,
+                "operator_codes": [{
+                    "builtin_code": "CUSTOM",
+                    "custom_code": "DummyCustomOperator"
+                }],
+                "subgraphs": [ {
+                    "tensors": [)";
+
+        // Add input tensors
+        for (unsigned int i = 0u; i < numInputs; ++i)
+        {
+            const TensorInfo& inputInfo = inputInfos[i];
+            m_JsonString += R"(
+                    {
+                        "shape": )" + GetTensorShapeAsString(inputInfo.GetShape()) + R"(,
+                        "type": )" + GetDataTypeAsString(inputInfo.GetDataType()) + R"(,
+                        "buffer": 0,
+                        "name": "inputTensor)" + std::to_string(i) + R"(",
+                        "quantization": {
+                            "min": [ 0.0 ],
+                            "max": [ 255.0 ],
+                            "scale": [ )" + std::to_string(inputInfo.GetQuantizationScale()) + R"( ],
+                            "zero_point": [ )" + std::to_string(inputInfo.GetQuantizationOffset()) + R"( ],
+                        }
+                    },)";
+        }
+
+        // Add output tensors
+        for (unsigned int i = 0u; i < numOutputs; ++i)
+        {
+            const TensorInfo& outputInfo = outputInfos[i];
+            m_JsonString += R"(
+                    {
+                        "shape": )" + GetTensorShapeAsString(outputInfo.GetShape()) + R"(,
+                        "type": )" + GetDataTypeAsString(outputInfo.GetDataType()) + R"(,
+                        "buffer": 0,
+                        "name": "outputTensor)" + std::to_string(i) + R"(",
+                        "quantization": {
+                            "min": [ 0.0 ],
+                            "max": [ 255.0 ],
+                            "scale": [ )" + std::to_string(outputInfo.GetQuantizationScale()) + R"( ],
+                            "zero_point": [ )" + std::to_string(outputInfo.GetQuantizationOffset()) + R"( ],
+                        }
+                    })";
+
+            if (i + 1 < numOutputs)
+            {
+                m_JsonString += ",";
+            }
+        }
+
+        const std::string inputIndices  = GetIndicesAsString(0u, numInputs - 1u);
+        const std::string outputIndices = GetIndicesAsString(numInputs, numInputs + numOutputs - 1u);
+
+        // Add dummy custom operator
+        m_JsonString +=  R"(],
+                    "inputs": )" + inputIndices + R"(,
+                    "outputs": )" + outputIndices + R"(,
+                    "operators": [
+                        {
+                            "opcode_index": 0,
+                            "inputs": )" + inputIndices + R"(,
+                            "outputs": )" + outputIndices + R"(,
+                            "builtin_options_type": 0,
+                            "custom_options": [ ],
+                            "custom_options_format": "FLEXBUFFERS"
+                        }
+                    ],
+                } ],
+                "buffers" : [
+                    { },
+                    { }
+                ]
+            }
+        )";
+
+        ReadStringToBinary();
+    }
+
+    void RunTest()
+    {
+        INetworkPtr network = m_Parser->CreateNetworkFromBinary(m_GraphBinary);
+        network->Accept(m_StandInLayerVerifier);
+    }
+
+private:
+    static std::string GetTensorShapeAsString(const TensorShape& tensorShape)
+    {
+        std::stringstream stream;
+        stream << "[ ";
+        for (unsigned int i = 0u; i < tensorShape.GetNumDimensions(); ++i)
+        {
+            stream << tensorShape[i];
+            if (i + 1 < tensorShape.GetNumDimensions())
+            {
+                stream << ",";
+            }
+            stream << " ";
+        }
+        stream << "]";
+
+        return stream.str();
+    }
+
+    static std::string GetDataTypeAsString(DataType dataType)
+    {
+        switch (dataType)
+        {
+            case DataType::Float32:         return "FLOAT32";
+            case DataType::QuantisedAsymm8: return "UINT8";
+            default:                        return "UNKNOWN";
+        }
+    }
+
+    static std::string GetIndicesAsString(unsigned int first, unsigned int last)
+    {
+        std::stringstream stream;
+        stream << "[ ";
+        for (unsigned int i = first; i <= last ; ++i)
+        {
+            stream << i;
+            if (i + 1 <= last)
+            {
+                stream << ",";
+            }
+            stream << " ";
+        }
+        stream << "]";
+
+        return stream.str();
+    }
+
+    StandInLayerVerifier m_StandInLayerVerifier;
+};
+
+class DummyCustom1Input1OutputFixture : public DummyCustomFixture
+{
+public:
+    DummyCustom1Input1OutputFixture()
+        : DummyCustomFixture({ TensorInfo({ 1, 1 }, DataType::Float32) },
+                             { TensorInfo({ 2, 2 }, DataType::Float32) }) {}
+};
+
+class DummyCustom2Inputs1OutputFixture : public DummyCustomFixture
+{
+public:
+    DummyCustom2Inputs1OutputFixture()
+        : DummyCustomFixture({ TensorInfo({ 1, 1 }, DataType::Float32), TensorInfo({ 2, 2 }, DataType::Float32) },
+                             { TensorInfo({ 3, 3 }, DataType::Float32) }) {}
+};
+
+BOOST_FIXTURE_TEST_CASE(UnsupportedCustomOperator1Input1Output, DummyCustom1Input1OutputFixture)
+{
+    RunTest();
+}
+
+BOOST_FIXTURE_TEST_CASE(UnsupportedCustomOperator2Inputs1Output, DummyCustom2Inputs1OutputFixture)
+{
+    RunTest();
+}
+
+BOOST_AUTO_TEST_SUITE_END()