Add new op BOX_WITH_NMS_LIMIT.
Test: NeuralNetworksTest_static
Change-Id: I084f1dfee90a9edc9892520ccc9f8a6a3cb84b11
diff --git a/common/OperationResolver.cpp b/common/OperationResolver.cpp
index 9388ec7..5f89eac 100644
--- a/common/OperationResolver.cpp
+++ b/common/OperationResolver.cpp
@@ -26,6 +26,7 @@
// TODO(b/119608412): Find a way to not reference every operation here.
const OperationRegistration* register_ABS();
const OperationRegistration* register_AXIS_ALIGNED_BBOX_TRANSFORM();
+const OperationRegistration* register_BOX_WITH_NMS_LIMIT();
const OperationRegistration* register_CHANNEL_SHUFFLE();
const OperationRegistration* register_EMBEDDING_LOOKUP_SPARSE();
const OperationRegistration* register_EQUAL();
@@ -62,6 +63,7 @@
OperationResolver::OperationResolver() {
registerOperation(register_ABS());
registerOperation(register_AXIS_ALIGNED_BBOX_TRANSFORM());
+ registerOperation(register_BOX_WITH_NMS_LIMIT());
registerOperation(register_CHANNEL_SHUFFLE());
registerOperation(register_EMBEDDING_LOOKUP_SPARSE());
registerOperation(register_EQUAL());
diff --git a/common/operations/GenerateProposals.cpp b/common/operations/GenerateProposals.cpp
index add0fa9..78f1e11 100644
--- a/common/operations/GenerateProposals.cpp
+++ b/common/operations/GenerateProposals.cpp
@@ -144,6 +144,24 @@
return true;
}
+// Taking two indices of bounding boxes, return the intersection-of-union.
+float getIoUAxisAligned(const float* roiBase, uint32_t ind1, uint32_t ind2) {
+ const uint32_t kRoiDim = 4;
+ const float* roi1 = roiBase + ind1 * kRoiDim;
+ const float* roi2 = roiBase + ind2 * kRoiDim;
+ const float area1 = (roi1[2] - roi1[0]) * (roi1[3] - roi1[1]);
+ const float area2 = (roi2[2] - roi2[0]) * (roi2[3] - roi2[1]);
+ const float x1 = std::max(roi1[0], roi2[0]);
+ const float x2 = std::min(roi1[2], roi2[2]);
+ const float y1 = std::max(roi1[1], roi2[1]);
+ const float y2 = std::min(roi1[3], roi2[3]);
+ const float w = std::max(x2 - x1, 0.0f);
+ const float h = std::max(y2 - y1, 0.0f);
+ const float areaIntersect = w * h;
+ const float areaUnion = area1 + area2 - areaIntersect;
+ return areaIntersect / areaUnion;
+}
+
} // namespace
namespace axis_aligned_bbox_transform {
@@ -261,6 +279,283 @@
}
} // namespace axis_aligned_bbox_transform
+
+namespace box_with_nms_limit {
+
+constexpr char kOperationName[] = "BOX_WITH_NMS_LIMIT";
+
+constexpr uint32_t kNumInputs = 6;
+constexpr uint32_t kScoreTensor = 0;
+constexpr uint32_t kRoiTensor = 1;
+constexpr uint32_t kBatchSplitTensor = 2;
+constexpr uint32_t kScoreThresholdScalar = 3;
+constexpr uint32_t kIoUThresholdScalar = 4;
+constexpr uint32_t kMaxNumDetectionScalar = 5;
+
+constexpr uint32_t kNumOutputs = 4;
+constexpr uint32_t kOutputScoreTensor = 0;
+constexpr uint32_t kOutputRoiTensor = 1;
+constexpr uint32_t kOutputClassTensor = 2;
+constexpr uint32_t kOutputBatchSplitTensor = 3;
+
+namespace {
+
+// Inplace hard NMS within range [select, select + selectLength).
+uint32_t* hardNmsSingleClass(const float* scoresData, const float* roiData, float iouThreshold,
+ uint32_t* select, uint32_t selectLength) {
+ uint32_t *selectStart = select, *selectEnd = select + selectLength;
+ while (selectStart < selectEnd) {
+ // find max score and swap to the front
+ auto& maxScore = *std::max_element(selectStart, selectEnd,
+ [&scoresData](const uint32_t& lhs, const uint32_t& rhs) {
+ return scoresData[lhs] < scoresData[rhs];
+ });
+ std::swap(maxScore, *selectStart);
+
+ // Calculate IoU of the rest, swap to the end (disgard) if needed.
+ for (uint32_t* i = selectStart + 1; i < selectEnd; i++) {
+ float iou = getIoUAxisAligned(roiData, *i, *selectStart);
+ if (iou >= iouThreshold) {
+ std::swap(*i--, *(--selectEnd));
+ }
+ }
+ selectStart++;
+ }
+ return selectStart;
+}
+
+void hardNmsMultiClass(const float* scoresData, const float* roiData, std::vector<uint32_t>& select,
+ uint32_t numClasses, uint32_t numRois, float scoreThreshold,
+ float iouThreshold, int32_t maxNumDetections) {
+ // Exclude class 0 (background)
+ for (uint32_t c = 1; c < numClasses; c++) {
+ uint32_t size = select.size();
+ for (uint32_t b = 0; b < numRois; b++) {
+ const uint32_t index = b * numClasses + c;
+ const float score = scoresData[index];
+ if (score > scoreThreshold) {
+ select.push_back(index);
+ }
+ }
+ uint32_t* selectStart = select.data() + size;
+ uint32_t selectLength = select.size() - size;
+ uint32_t* selectEnd =
+ hardNmsSingleClass(scoresData, roiData, iouThreshold, selectStart, selectLength);
+ select.resize(selectEnd - select.data());
+ }
+
+ // Take top maxNumDetections.
+ if (maxNumDetections < 0 || select.size() <= maxNumDetections) {
+ return;
+ }
+ std::sort(select.begin(), select.end(),
+ [&scoresData](const uint32_t& lhs, const uint32_t& rhs) {
+ return scoresData[lhs] > scoresData[rhs];
+ });
+ select.resize(maxNumDetections);
+
+ // Sort again by class.
+ std::sort(select.begin(), select.end(),
+ [&scoresData, numClasses](const uint32_t& lhs, const uint32_t& rhs) {
+ uint32_t lhsClass = lhs % numClasses, rhsClass = rhs % numClasses;
+ return lhsClass == rhsClass ? scoresData[lhs] > scoresData[rhs]
+ : lhsClass < rhsClass;
+ });
+}
+
+bool boxWithNmsLimitFloat32(const float* scoresData, const Shape& scoresShape, const float* roiData,
+ const Shape& roiShape, const int32_t* batchSplitData,
+ const Shape& batchSplitShape, float scoreThreshold, float iouThreshold,
+ int32_t maxNumDetections, float* scoresOutData, Shape scoresOutShape,
+ float* roiOutData, Shape roiOutShape, int32_t* classesOutData,
+ Shape classesOutShape, int32_t* batchSplitOutData,
+ const Shape& batchSplitOutShape, IOperationExecutionContext* context) {
+ NNTRACE_TRANS("boxWithNmsLimit");
+ const uint32_t kRoiDim = 4;
+ uint32_t numClasses = getSizeOfDimension(scoresShape, 1);
+ uint32_t numBatches = getSizeOfDimension(batchSplitShape, 0);
+
+ const float* scoresBase = scoresData;
+ const float* roiBase = roiData;
+ int32_t* batchSplitOutPtr = batchSplitOutData;
+ std::vector<uint32_t> selected;
+ for (uint32_t b = 0; b < numBatches; b++) {
+ std::vector<uint32_t> result;
+ hardNmsMultiClass(scoresBase, roiBase, result, numClasses, batchSplitData[b],
+ scoreThreshold, iouThreshold, maxNumDetections);
+ selected.insert(selected.end(), result.begin(), result.end());
+ *batchSplitOutPtr++ = result.size();
+ scoresBase += batchSplitData[b] * numClasses;
+ roiBase += batchSplitData[b] * numClasses * kRoiDim;
+ }
+
+ // Set output dimensions.
+ uint32_t numOutRois = selected.size();
+ scoresOutShape.dimensions = {numOutRois};
+ NN_RET_CHECK(context->setOutputShape(kOutputScoreTensor, scoresOutShape));
+ roiOutShape.dimensions = {numOutRois, 4};
+ NN_RET_CHECK(context->setOutputShape(kOutputRoiTensor, roiOutShape));
+ classesOutShape.dimensions = {numOutRois};
+ NN_RET_CHECK(context->setOutputShape(kOutputClassTensor, classesOutShape));
+
+ // Write outputs.
+ float* scoresOutPtr = scoresOutData;
+ float* roiOutPtr = roiOutData;
+ int32_t* classesOutPtr = classesOutData;
+ scoresBase = scoresData;
+ roiBase = roiData;
+ uint32_t i = 0;
+ for (uint32_t b = 0; b < numBatches; b++) {
+ for (uint32_t j = 0; j < batchSplitOutData[b]; j++) {
+ uint32_t index = selected[i++];
+ *scoresOutPtr++ = scoresBase[index];
+ memcpy(roiOutPtr, roiBase + index * kRoiDim, kRoiDim * sizeof(float));
+ roiOutPtr += kRoiDim;
+ *classesOutPtr++ = index % numClasses;
+ }
+ scoresBase += batchSplitData[b] * numClasses;
+ roiBase += batchSplitData[b] * numClasses * kRoiDim;
+ }
+ return true;
+}
+
+bool boxWithNmsLimitFloat16(const _Float16* scoresData, const Shape& scoresShape,
+ const _Float16* roiData, const Shape& roiShape,
+ const int32_t* batchSplitData, const Shape& batchSplitShape,
+ _Float16 scoreThreshold, _Float16 iouThreshold,
+ int32_t maxNumDetections, _Float16* scoresOutData,
+ const Shape& scoresOutShape, _Float16* roiOutData,
+ const Shape& roiOutShape, int32_t* classesOutData,
+ const Shape& classesOutShape, int32_t* batchSplitOutData,
+ const Shape& batchSplitOutShape, IOperationExecutionContext* context) {
+ std::vector<float> scores_float32(getNumberOfElements(scoresShape));
+ convertFloat16ToFloat32(scoresData, &scores_float32);
+ std::vector<float> roi_float32(getNumberOfElements(roiShape));
+ convertFloat16ToFloat32(roiData, &roi_float32);
+ std::vector<float> outputScores_float32(getNumberOfElements(scoresOutShape));
+ std::vector<float> outputRoi_float32(getNumberOfElements(roiOutShape));
+ NN_RET_CHECK(boxWithNmsLimitFloat32(
+ scores_float32.data(), scoresShape, roi_float32.data(), roiShape, batchSplitData,
+ batchSplitShape, scoreThreshold, iouThreshold, maxNumDetections,
+ outputScores_float32.data(), scoresOutShape, outputRoi_float32.data(), roiOutShape,
+ classesOutData, classesOutShape, batchSplitOutData, batchSplitOutShape, context));
+ convertFloat32ToFloat16(outputScores_float32, scoresOutData);
+ convertFloat32ToFloat16(outputRoi_float32, roiOutData);
+ return true;
+}
+
+} // namespace
+
+bool validate(const IOperationValidationContext* context) {
+ NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
+ NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs);
+ std::vector<OperandType> inExpectedTypes;
+ auto inputType = context->getInputType(kScoreTensor);
+ if (inputType == OperandType::TENSOR_FLOAT16) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16,
+ OperandType::TENSOR_INT32, OperandType::FLOAT16,
+ OperandType::FLOAT16, OperandType::INT32};
+ } else if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32,
+ OperandType::TENSOR_INT32, OperandType::FLOAT32,
+ OperandType::FLOAT32, OperandType::INT32};
+ } else {
+ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
+ }
+ NN_RET_CHECK(validateInputTypes(context, inExpectedTypes));
+ NN_RET_CHECK(validateOutputTypes(
+ context, {inputType, inputType, OperandType::TENSOR_INT32, OperandType::TENSOR_INT32}));
+ return validateHalVersion(context, HalVersion::V1_2);
+}
+
+bool prepare(IOperationExecutionContext* context) {
+ Shape scoreShape = context->getInputShape(kScoreTensor);
+ Shape roiShape = context->getInputShape(kRoiTensor);
+ Shape batchSplitShape = context->getInputShape(kBatchSplitTensor);
+ Shape outputScoreShape = context->getOutputShape(kOutputScoreTensor);
+ Shape outputRoiShape = context->getOutputShape(kOutputRoiTensor);
+ Shape outputClassShape = context->getOutputShape(kOutputClassTensor);
+ Shape outputBatchSplitShape = context->getOutputShape(kOutputBatchSplitTensor);
+
+ NN_RET_CHECK(getNumberOfDimensions(scoreShape) == 2);
+ NN_RET_CHECK(getNumberOfDimensions(roiShape) == 2);
+ NN_RET_CHECK(getNumberOfDimensions(batchSplitShape) == 1);
+
+ const uint32_t kRoiDim = 4;
+ uint32_t numRois = getSizeOfDimension(scoreShape, 0);
+ uint32_t numClasses = getSizeOfDimension(scoreShape, 1);
+ uint32_t numBatches = getSizeOfDimension(batchSplitShape, 0);
+ NN_RET_CHECK(getSizeOfDimension(roiShape, 0) == numRois);
+ NN_RET_CHECK(getSizeOfDimension(roiShape, 1) == kRoiDim * numClasses);
+ NN_RET_CHECK_GT(numClasses, 1);
+
+ outputScoreShape.type = scoreShape.type;
+ outputScoreShape.dimensions = {0};
+ NN_RET_CHECK(context->setOutputShape(kOutputScoreTensor, outputScoreShape));
+
+ outputRoiShape.type = roiShape.type;
+ outputRoiShape.dimensions = {0, 4};
+ NN_RET_CHECK(context->setOutputShape(kOutputRoiTensor, outputRoiShape));
+
+ outputClassShape.type = OperandType::TENSOR_INT32;
+ outputClassShape.dimensions = {0};
+ NN_RET_CHECK(context->setOutputShape(kOutputClassTensor, outputClassShape));
+
+ outputBatchSplitShape.type = batchSplitShape.type;
+ outputBatchSplitShape.dimensions = {numBatches};
+ NN_RET_CHECK(context->setOutputShape(kOutputBatchSplitTensor, outputBatchSplitShape));
+ return true;
+}
+
+bool execute(IOperationExecutionContext* context) {
+ NNTRACE_TRANS("boxWithNMSLimit");
+ switch (context->getInputType(kScoreTensor)) {
+ case OperandType::TENSOR_FLOAT16: {
+ return boxWithNmsLimitFloat16(
+ context->getInputBuffer<_Float16>(kScoreTensor),
+ context->getInputShape(kScoreTensor),
+ context->getInputBuffer<_Float16>(kRoiTensor),
+ context->getInputShape(kRoiTensor),
+ context->getInputBuffer<int32_t>(kBatchSplitTensor),
+ context->getInputShape(kBatchSplitTensor),
+ context->getInputValue<_Float16>(kScoreThresholdScalar),
+ context->getInputValue<_Float16>(kIoUThresholdScalar),
+ context->getInputValue<int32_t>(kMaxNumDetectionScalar),
+ context->getOutputBuffer<_Float16>(kOutputScoreTensor),
+ context->getOutputShape(kOutputScoreTensor),
+ context->getOutputBuffer<_Float16>(kOutputRoiTensor),
+ context->getOutputShape(kOutputRoiTensor),
+ context->getOutputBuffer<int32_t>(kOutputClassTensor),
+ context->getOutputShape(kOutputClassTensor),
+ context->getOutputBuffer<int32_t>(kOutputBatchSplitTensor),
+ context->getOutputShape(kOutputBatchSplitTensor), context);
+ }
+ case OperandType::TENSOR_FLOAT32: {
+ return boxWithNmsLimitFloat32(
+ context->getInputBuffer<float>(kScoreTensor),
+ context->getInputShape(kScoreTensor),
+ context->getInputBuffer<float>(kRoiTensor), context->getInputShape(kRoiTensor),
+ context->getInputBuffer<int32_t>(kBatchSplitTensor),
+ context->getInputShape(kBatchSplitTensor),
+ context->getInputValue<float>(kScoreThresholdScalar),
+ context->getInputValue<float>(kIoUThresholdScalar),
+ context->getInputValue<int32_t>(kMaxNumDetectionScalar),
+ context->getOutputBuffer<float>(kOutputScoreTensor),
+ context->getOutputShape(kOutputScoreTensor),
+ context->getOutputBuffer<float>(kOutputRoiTensor),
+ context->getOutputShape(kOutputRoiTensor),
+ context->getOutputBuffer<int32_t>(kOutputClassTensor),
+ context->getOutputShape(kOutputClassTensor),
+ context->getOutputBuffer<int32_t>(kOutputBatchSplitTensor),
+ context->getOutputShape(kOutputBatchSplitTensor), context);
+ }
+ default:
+ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
+ }
+}
+
+} // namespace box_with_nms_limit
+
} // namespace generate_proposals
NN_REGISTER_OPERATION(AXIS_ALIGNED_BBOX_TRANSFORM,
@@ -268,5 +563,10 @@
generate_proposals::axis_aligned_bbox_transform::validate,
generate_proposals::axis_aligned_bbox_transform::prepare,
generate_proposals::axis_aligned_bbox_transform::execute);
+
+NN_REGISTER_OPERATION(BOX_WITH_NMS_LIMIT, generate_proposals::box_with_nms_limit::kOperationName,
+ generate_proposals::box_with_nms_limit::validate,
+ generate_proposals::box_with_nms_limit::prepare,
+ generate_proposals::box_with_nms_limit::execute);
} // namespace nn
} // namespace android
diff --git a/runtime/include/NeuralNetworks.h b/runtime/include/NeuralNetworks.h
index 2614c76..4d5b726 100644
--- a/runtime/include/NeuralNetworks.h
+++ b/runtime/include/NeuralNetworks.h
@@ -2285,6 +2285,59 @@
ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM = 41,
ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM = 42,
ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_RNN = 43,
+
+ /**
+ * Greedily selects a subset of bounding boxes in descending order of score.
+ *
+ * This op applies hard NMS algorithm to each class. In each loop of
+ * execution, the box with maximum score gets selected, and any boxes with
+ * the intersection-over-union (IOU) greater than a threshold are removed
+ * from the pending set.
+ *
+ * Axis-aligned bounding boxes are represented by its upper-left corner
+ * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid
+ * bounding box should satisfy x1 <= x2 and y1 <= y2.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ *
+ * Inputs:
+ * * 0: A 2-D Tensor of shape [num_rois, num_classes], specifying the score
+ * of each bounding box proposal. The boxes are grouped by batches in the
+ * first dimension.
+ * * 1: A 2-D Tensor specifying the bounding boxes of shape
+ * [num_rois, num_classes * 4], organized in the order [x1, y1, x2, y2].
+ * The boxes are grouped by batches in the first dimension. The sequential
+ * order of the boxes corresponds with input0.
+ * * 2: A 1-D Tensor of shape [batches], specifying the number of boxes
+ * for each image in the batch.
+ * * 3: An {@link ANEURALNETWORKS_FLOAT32} scalar, score_threshold. Boxes
+ * with scores lower than the threshold are filtered before sending
+ * to the NMS algorithm.
+ * * 4: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the IoU
+ * threshold.
+ * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum
+ * number of selected bounding boxes for each image. Set to a negative
+ * value for unlimited number of output bounding boxes.
+ *
+ * Outputs:
+ * * 0: A 1-D Tensor of the same {@link OperandCode} as input0, with shape
+ * [num_output_rois], specifying the score of each output box. The boxes
+ * are grouped by batches, but the sequential order in each batch is not
+ * guaranteed.
+ * * 1: A 2-D Tensor of the same {@link OperandCode} as input1, with shape
+ * [num_output_rois, 4], specifying the coordinates of each
+ * output bounding box with the same format as input1. The sequential
+ * order of the boxes corresponds with output0.
+ * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
+ * [num_output_rois], specifying the class of each output box. The
+ * sequential order of the boxes corresponds with output0.
+ * * 3: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
+ * [batches], specifying the number of output boxes for each image.
+ *
+ * Available since API level 29.
+ */
ANEURALNETWORKS_BOX_WITH_NMS_LIMIT = 44,
/**
diff --git a/runtime/test/TestValidateOperations.cpp b/runtime/test/TestValidateOperations.cpp
index e7d73fa..b6ad386 100644
--- a/runtime/test/TestValidateOperations.cpp
+++ b/runtime/test/TestValidateOperations.cpp
@@ -2569,4 +2569,33 @@
powTest(ANEURALNETWORKS_TENSOR_FLOAT32);
}
+void boxWithNmsLimitOpTest(int32_t scoreOperandCode, int32_t roiOperandCode,
+ int32_t scalarOperandCode) {
+ uint32_t scoreDim[] = {19, 3}, roiDim[] = {19, 12}, splitDim[] = {2};
+ uint32_t outScoreDim[] = {12}, outRoiDim[] = {12, 4}, outClassDim[] = {12}, outSplitDim[] = {2};
+ OperationTestBase boxWithNmsLimitTest(
+ ANEURALNETWORKS_BOX_WITH_NMS_LIMIT,
+ {getOpType(scoreOperandCode, 2, scoreDim), getOpType(roiOperandCode, 2, roiDim),
+ getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, splitDim), getOpType(scalarOperandCode),
+ getOpType(scalarOperandCode), getOpType(ANEURALNETWORKS_INT32)},
+ {getOpType(scoreOperandCode, 1, outScoreDim), getOpType(roiOperandCode, 2, outRoiDim),
+ getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, outClassDim),
+ getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, outSplitDim)});
+
+ EXPECT_TRUE(boxWithNmsLimitTest.testMutatingInputOperandCode());
+ EXPECT_TRUE(boxWithNmsLimitTest.testMutatingInputOperandCounts());
+ EXPECT_TRUE(boxWithNmsLimitTest.testMutatingOutputOperandCode());
+ EXPECT_TRUE(boxWithNmsLimitTest.testMutatingOutputOperandCounts());
+}
+
+TEST(OperationValidationTest, BOX_WITH_NMS_LIMIT_float16) {
+ boxWithNmsLimitOpTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT16,
+ ANEURALNETWORKS_FLOAT16);
+}
+
+TEST(OperationValidationTest, BOX_WITH_NMS_LIMIT_float32) {
+ boxWithNmsLimitOpTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT32,
+ ANEURALNETWORKS_FLOAT32);
+}
+
} // end namespace
diff --git a/runtime/test/for-cts/TestGeneratedOneFile.cpp b/runtime/test/for-cts/TestGeneratedOneFile.cpp
index b94970e..ef74e7d 100644
--- a/runtime/test/for-cts/TestGeneratedOneFile.cpp
+++ b/runtime/test/for-cts/TestGeneratedOneFile.cpp
@@ -337,6 +337,7 @@
#include "../generated/tests/avg_pool_v1_2.mod.py.cpp"
#include "../generated/tests/axis_aligned_bbox_transform.mod.py.cpp"
#include "../generated/tests/batch_to_space_v1_2.mod.py.cpp"
+#include "../generated/tests/box_with_nms_limit.mod.py.cpp"
#include "../generated/tests/cast.mod.py.cpp"
#include "../generated/tests/channel_shuffle.mod.py.cpp"
#include "../generated/tests/concat_float16_1.mod.py.cpp"
diff --git a/runtime/test/generated/examples/box_with_nms_limit.example.cpp b/runtime/test/generated/examples/box_with_nms_limit.example.cpp
new file mode 100644
index 0000000..e5990e9
--- /dev/null
+++ b/runtime/test/generated/examples/box_with_nms_limit.example.cpp
@@ -0,0 +1,638 @@
+// clang-format off
+// Generated file (from: box_with_nms_limit.mod.py). Do not edit
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {19, 3}}, {1, {19, 12}}, {2, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {0.9f, 0.95f, 0.75f, 0.8f, 0.7f, 0.85f, 0.6f, 0.9f, 0.95f, 0.9f, 0.65f, 0.9f, 0.8f, 0.85f, 0.8f, 0.6f, 0.6f, 0.2f, 0.6f, 0.8f, 0.4f, 0.9f, 0.55f, 0.6f, 0.9f, 0.75f, 0.7f, 0.8f, 0.7f, 0.85f, 0.9f, 0.95f, 0.75f, 0.8f, 0.85f, 0.8f, 0.6f, 0.9f, 0.95f, 0.6f, 0.6f, 0.2f, 0.5f, 0.9f, 0.8f, 0.9f, 0.75f, 0.7f, 0.9f, 0.65f, 0.9f, 0.9f, 0.55f, 0.6f, 0.6f, 0.8f, 0.4f}}, {1, {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f, 2.0f, 2.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 3.0f, 3.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 4.0f, 4.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 5.0f, 5.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 6.0f, 6.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 7.0f, 7.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 8.0f, 8.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f, 9.0f, 9.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 2.0f, 2.0f, 11.0f, 11.0f, 2.0f, 2.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 1.0f, 1.0f, 10.0f, 10.0f, 1.0f, 1.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 5.0f, 5.0f, 14.0f, 14.0f, 5.0f, 5.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 3.0f, 3.0f, 12.0f, 12.0f, 3.0f, 3.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 6.0f, 6.0f, 15.0f, 15.0f, 6.0f, 6.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 2.0f, 2.0f, 0.0f, 0.0f, 2.0f, 2.0f, 9.0f, 9.0f, 18.0f, 18.0f, 9.0f, 9.0f, 19.0f, 19.0f, 9.0f, 9.0f, 19.0f, 19.0f, 4.0f, 4.0f, 13.0f, 13.0f, 4.0f, 4.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 8.0f, 8.0f, 17.0f, 17.0f, 8.0f, 8.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 7.0f, 7.0f, 16.0f, 16.0f, 7.0f, 7.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f}}},
+ // int -> INT32 map
+ .int32Operands = {{2, {9, 10}}},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {12}}, {1, {12, 4}}, {2, {12}}, {3, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {0.95f, 0.85f, 0.75f, 0.95f, 0.7f, 0.95f, 0.9f, 0.85f, 0.75f, 0.95f, 0.8f, 0.7f}}, {1, {0.0f, 0.0f, 10.0f, 10.0f, 4.0f, 4.0f, 14.0f, 14.0f, 8.0f, 8.0f, 18.0f, 18.0f, 2.0f, 2.0f, 12.0f, 12.0f, 8.0f, 8.0f, 18.0f, 18.0f, 1.0f, 1.0f, 11.0f, 11.0f, 0.0f, 0.0f, 2.0f, 2.0f, 5.0f, 5.0f, 15.0f, 15.0f, 9.0f, 9.0f, 19.0f, 19.0f, 3.0f, 3.0f, 13.0f, 13.0f, 0.0f, 0.0f, 2.0f, 2.0f, 9.0f, 9.0f, 19.0f, 19.0f}}},
+ // int -> INT32 map
+ .int32Operands = {{2, {1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 2, 2}}, {3, {5, 7}}},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples;
+};
+
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {19, 3}}, {1, {19, 12}}, {2, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {0.9f, 0.95f, 0.75f, 0.8f, 0.7f, 0.85f, 0.6f, 0.9f, 0.95f, 0.9f, 0.65f, 0.9f, 0.8f, 0.85f, 0.8f, 0.6f, 0.6f, 0.2f, 0.6f, 0.8f, 0.4f, 0.9f, 0.55f, 0.6f, 0.9f, 0.75f, 0.7f, 0.8f, 0.7f, 0.85f, 0.9f, 0.95f, 0.75f, 0.8f, 0.85f, 0.8f, 0.6f, 0.9f, 0.95f, 0.6f, 0.6f, 0.2f, 0.5f, 0.9f, 0.8f, 0.9f, 0.75f, 0.7f, 0.9f, 0.65f, 0.9f, 0.9f, 0.55f, 0.6f, 0.6f, 0.8f, 0.4f}}, {1, {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f, 2.0f, 2.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 3.0f, 3.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 4.0f, 4.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 5.0f, 5.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 6.0f, 6.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 7.0f, 7.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 8.0f, 8.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f, 9.0f, 9.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 2.0f, 2.0f, 11.0f, 11.0f, 2.0f, 2.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 1.0f, 1.0f, 10.0f, 10.0f, 1.0f, 1.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 5.0f, 5.0f, 14.0f, 14.0f, 5.0f, 5.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 3.0f, 3.0f, 12.0f, 12.0f, 3.0f, 3.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 6.0f, 6.0f, 15.0f, 15.0f, 6.0f, 6.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 2.0f, 2.0f, 0.0f, 0.0f, 2.0f, 2.0f, 9.0f, 9.0f, 18.0f, 18.0f, 9.0f, 9.0f, 19.0f, 19.0f, 9.0f, 9.0f, 19.0f, 19.0f, 4.0f, 4.0f, 13.0f, 13.0f, 4.0f, 4.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 8.0f, 8.0f, 17.0f, 17.0f, 8.0f, 8.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 7.0f, 7.0f, 16.0f, 16.0f, 7.0f, 7.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f}}},
+ // int -> INT32 map
+ .int32Operands = {{2, {9, 10}}},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {12}}, {1, {12, 4}}, {2, {12}}, {3, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {0.95f, 0.85f, 0.75f, 0.95f, 0.7f, 0.95f, 0.9f, 0.85f, 0.75f, 0.95f, 0.8f, 0.7f}}, {1, {0.0f, 0.0f, 10.0f, 10.0f, 4.0f, 4.0f, 14.0f, 14.0f, 8.0f, 8.0f, 18.0f, 18.0f, 2.0f, 2.0f, 12.0f, 12.0f, 8.0f, 8.0f, 18.0f, 18.0f, 1.0f, 1.0f, 11.0f, 11.0f, 0.0f, 0.0f, 2.0f, 2.0f, 5.0f, 5.0f, 15.0f, 15.0f, 9.0f, 9.0f, 19.0f, 19.0f, 3.0f, 3.0f, 13.0f, 13.0f, 0.0f, 0.0f, 2.0f, 2.0f, 9.0f, 9.0f, 19.0f, 19.0f}}},
+ // int -> INT32 map
+ .int32Operands = {{2, {1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 2, 2}}, {3, {5, 7}}},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_relaxed;
+};
+
+std::vector<MixedTypedExample>& get_examples_float16() {
+static std::vector<MixedTypedExample> examples_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {19, 3}}, {1, {19, 12}}, {2, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {{2, {9, 10}}},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {{0, {0.8999999761581421f, 0.949999988079071f, 0.75f, 0.800000011920929f, 0.699999988079071f, 0.8500000238418579f, 0.6000000238418579f, 0.8999999761581421f, 0.949999988079071f, 0.8999999761581421f, 0.6499999761581421f, 0.8999999761581421f, 0.800000011920929f, 0.8500000238418579f, 0.800000011920929f, 0.6000000238418579f, 0.6000000238418579f, 0.20000000298023224f, 0.6000000238418579f, 0.800000011920929f, 0.4000000059604645f, 0.8999999761581421f, 0.550000011920929f, 0.6000000238418579f, 0.8999999761581421f, 0.75f, 0.699999988079071f, 0.800000011920929f, 0.699999988079071f, 0.8500000238418579f, 0.8999999761581421f, 0.949999988079071f, 0.75f, 0.800000011920929f, 0.8500000238418579f, 0.800000011920929f, 0.6000000238418579f, 0.8999999761581421f, 0.949999988079071f, 0.6000000238418579f, 0.6000000238418579f, 0.20000000298023224f, 0.5f, 0.8999999761581421f, 0.800000011920929f, 0.8999999761581421f, 0.75f, 0.699999988079071f, 0.8999999761581421f, 0.6499999761581421f, 0.8999999761581421f, 0.8999999761581421f, 0.550000011920929f, 0.6000000238418579f, 0.6000000238418579f, 0.800000011920929f, 0.4000000059604645f}}, {1, {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f, 2.0f, 2.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 3.0f, 3.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 4.0f, 4.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 5.0f, 5.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 6.0f, 6.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 7.0f, 7.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 8.0f, 8.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f, 9.0f, 9.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 2.0f, 2.0f, 11.0f, 11.0f, 2.0f, 2.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 1.0f, 1.0f, 10.0f, 10.0f, 1.0f, 1.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 5.0f, 5.0f, 14.0f, 14.0f, 5.0f, 5.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 3.0f, 3.0f, 12.0f, 12.0f, 3.0f, 3.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 6.0f, 6.0f, 15.0f, 15.0f, 6.0f, 6.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 2.0f, 2.0f, 0.0f, 0.0f, 2.0f, 2.0f, 9.0f, 9.0f, 18.0f, 18.0f, 9.0f, 9.0f, 19.0f, 19.0f, 9.0f, 9.0f, 19.0f, 19.0f, 4.0f, 4.0f, 13.0f, 13.0f, 4.0f, 4.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 8.0f, 8.0f, 17.0f, 17.0f, 8.0f, 8.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 7.0f, 7.0f, 16.0f, 16.0f, 7.0f, 7.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f}}},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {12}}, {1, {12, 4}}, {2, {12}}, {3, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {{2, {1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 2, 2}}, {3, {5, 7}}},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {{0, {0.949999988079071f, 0.8500000238418579f, 0.75f, 0.949999988079071f, 0.699999988079071f, 0.949999988079071f, 0.8999999761581421f, 0.8500000238418579f, 0.75f, 0.949999988079071f, 0.800000011920929f, 0.699999988079071f}}, {1, {0.0f, 0.0f, 10.0f, 10.0f, 4.0f, 4.0f, 14.0f, 14.0f, 8.0f, 8.0f, 18.0f, 18.0f, 2.0f, 2.0f, 12.0f, 12.0f, 8.0f, 8.0f, 18.0f, 18.0f, 1.0f, 1.0f, 11.0f, 11.0f, 0.0f, 0.0f, 2.0f, 2.0f, 5.0f, 5.0f, 15.0f, 15.0f, 9.0f, 9.0f, 19.0f, 19.0f, 3.0f, 3.0f, 13.0f, 13.0f, 0.0f, 0.0f, 2.0f, 2.0f, 9.0f, 9.0f, 19.0f, 19.0f}}},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_dynamic_output_shape() {
+static std::vector<MixedTypedExample> examples_dynamic_output_shape = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {19, 3}}, {1, {19, 12}}, {2, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {0.9f, 0.95f, 0.75f, 0.8f, 0.7f, 0.85f, 0.6f, 0.9f, 0.95f, 0.9f, 0.65f, 0.9f, 0.8f, 0.85f, 0.8f, 0.6f, 0.6f, 0.2f, 0.6f, 0.8f, 0.4f, 0.9f, 0.55f, 0.6f, 0.9f, 0.75f, 0.7f, 0.8f, 0.7f, 0.85f, 0.9f, 0.95f, 0.75f, 0.8f, 0.85f, 0.8f, 0.6f, 0.9f, 0.95f, 0.6f, 0.6f, 0.2f, 0.5f, 0.9f, 0.8f, 0.9f, 0.75f, 0.7f, 0.9f, 0.65f, 0.9f, 0.9f, 0.55f, 0.6f, 0.6f, 0.8f, 0.4f}}, {1, {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f, 2.0f, 2.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 3.0f, 3.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 4.0f, 4.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 5.0f, 5.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 6.0f, 6.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 7.0f, 7.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 8.0f, 8.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f, 9.0f, 9.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 2.0f, 2.0f, 11.0f, 11.0f, 2.0f, 2.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 1.0f, 1.0f, 10.0f, 10.0f, 1.0f, 1.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 5.0f, 5.0f, 14.0f, 14.0f, 5.0f, 5.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 3.0f, 3.0f, 12.0f, 12.0f, 3.0f, 3.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 6.0f, 6.0f, 15.0f, 15.0f, 6.0f, 6.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 2.0f, 2.0f, 0.0f, 0.0f, 2.0f, 2.0f, 9.0f, 9.0f, 18.0f, 18.0f, 9.0f, 9.0f, 19.0f, 19.0f, 9.0f, 9.0f, 19.0f, 19.0f, 4.0f, 4.0f, 13.0f, 13.0f, 4.0f, 4.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 8.0f, 8.0f, 17.0f, 17.0f, 8.0f, 8.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 7.0f, 7.0f, 16.0f, 16.0f, 7.0f, 7.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f}}},
+ // int -> INT32 map
+ .int32Operands = {{2, {9, 10}}},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {12}}, {1, {12, 4}}, {2, {12}}, {3, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {0.95f, 0.85f, 0.75f, 0.95f, 0.7f, 0.95f, 0.9f, 0.85f, 0.75f, 0.95f, 0.8f, 0.7f}}, {1, {0.0f, 0.0f, 10.0f, 10.0f, 4.0f, 4.0f, 14.0f, 14.0f, 8.0f, 8.0f, 18.0f, 18.0f, 2.0f, 2.0f, 12.0f, 12.0f, 8.0f, 8.0f, 18.0f, 18.0f, 1.0f, 1.0f, 11.0f, 11.0f, 0.0f, 0.0f, 2.0f, 2.0f, 5.0f, 5.0f, 15.0f, 15.0f, 9.0f, 9.0f, 19.0f, 19.0f, 3.0f, 3.0f, 13.0f, 13.0f, 0.0f, 0.0f, 2.0f, 2.0f, 9.0f, 9.0f, 19.0f, 19.0f}}},
+ // int -> INT32 map
+ .int32Operands = {{2, {1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 2, 2}}, {3, {5, 7}}},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_dynamic_output_shape;
+};
+
+std::vector<MixedTypedExample>& get_examples_dynamic_output_shape_relaxed() {
+static std::vector<MixedTypedExample> examples_dynamic_output_shape_relaxed = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {19, 3}}, {1, {19, 12}}, {2, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {0.9f, 0.95f, 0.75f, 0.8f, 0.7f, 0.85f, 0.6f, 0.9f, 0.95f, 0.9f, 0.65f, 0.9f, 0.8f, 0.85f, 0.8f, 0.6f, 0.6f, 0.2f, 0.6f, 0.8f, 0.4f, 0.9f, 0.55f, 0.6f, 0.9f, 0.75f, 0.7f, 0.8f, 0.7f, 0.85f, 0.9f, 0.95f, 0.75f, 0.8f, 0.85f, 0.8f, 0.6f, 0.9f, 0.95f, 0.6f, 0.6f, 0.2f, 0.5f, 0.9f, 0.8f, 0.9f, 0.75f, 0.7f, 0.9f, 0.65f, 0.9f, 0.9f, 0.55f, 0.6f, 0.6f, 0.8f, 0.4f}}, {1, {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f, 2.0f, 2.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 3.0f, 3.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 4.0f, 4.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 5.0f, 5.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 6.0f, 6.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 7.0f, 7.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 8.0f, 8.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f, 9.0f, 9.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 2.0f, 2.0f, 11.0f, 11.0f, 2.0f, 2.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 1.0f, 1.0f, 10.0f, 10.0f, 1.0f, 1.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 5.0f, 5.0f, 14.0f, 14.0f, 5.0f, 5.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 3.0f, 3.0f, 12.0f, 12.0f, 3.0f, 3.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 6.0f, 6.0f, 15.0f, 15.0f, 6.0f, 6.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 2.0f, 2.0f, 0.0f, 0.0f, 2.0f, 2.0f, 9.0f, 9.0f, 18.0f, 18.0f, 9.0f, 9.0f, 19.0f, 19.0f, 9.0f, 9.0f, 19.0f, 19.0f, 4.0f, 4.0f, 13.0f, 13.0f, 4.0f, 4.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 8.0f, 8.0f, 17.0f, 17.0f, 8.0f, 8.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 7.0f, 7.0f, 16.0f, 16.0f, 7.0f, 7.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f}}},
+ // int -> INT32 map
+ .int32Operands = {{2, {9, 10}}},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {12}}, {1, {12, 4}}, {2, {12}}, {3, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {0.95f, 0.85f, 0.75f, 0.95f, 0.7f, 0.95f, 0.9f, 0.85f, 0.75f, 0.95f, 0.8f, 0.7f}}, {1, {0.0f, 0.0f, 10.0f, 10.0f, 4.0f, 4.0f, 14.0f, 14.0f, 8.0f, 8.0f, 18.0f, 18.0f, 2.0f, 2.0f, 12.0f, 12.0f, 8.0f, 8.0f, 18.0f, 18.0f, 1.0f, 1.0f, 11.0f, 11.0f, 0.0f, 0.0f, 2.0f, 2.0f, 5.0f, 5.0f, 15.0f, 15.0f, 9.0f, 9.0f, 19.0f, 19.0f, 3.0f, 3.0f, 13.0f, 13.0f, 0.0f, 0.0f, 2.0f, 2.0f, 9.0f, 9.0f, 19.0f, 19.0f}}},
+ // int -> INT32 map
+ .int32Operands = {{2, {1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 2, 2}}, {3, {5, 7}}},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_dynamic_output_shape_relaxed;
+};
+
+std::vector<MixedTypedExample>& get_examples_dynamic_output_shape_float16() {
+static std::vector<MixedTypedExample> examples_dynamic_output_shape_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {19, 3}}, {1, {19, 12}}, {2, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {{2, {9, 10}}},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {{0, {0.8999999761581421f, 0.949999988079071f, 0.75f, 0.800000011920929f, 0.699999988079071f, 0.8500000238418579f, 0.6000000238418579f, 0.8999999761581421f, 0.949999988079071f, 0.8999999761581421f, 0.6499999761581421f, 0.8999999761581421f, 0.800000011920929f, 0.8500000238418579f, 0.800000011920929f, 0.6000000238418579f, 0.6000000238418579f, 0.20000000298023224f, 0.6000000238418579f, 0.800000011920929f, 0.4000000059604645f, 0.8999999761581421f, 0.550000011920929f, 0.6000000238418579f, 0.8999999761581421f, 0.75f, 0.699999988079071f, 0.800000011920929f, 0.699999988079071f, 0.8500000238418579f, 0.8999999761581421f, 0.949999988079071f, 0.75f, 0.800000011920929f, 0.8500000238418579f, 0.800000011920929f, 0.6000000238418579f, 0.8999999761581421f, 0.949999988079071f, 0.6000000238418579f, 0.6000000238418579f, 0.20000000298023224f, 0.5f, 0.8999999761581421f, 0.800000011920929f, 0.8999999761581421f, 0.75f, 0.699999988079071f, 0.8999999761581421f, 0.6499999761581421f, 0.8999999761581421f, 0.8999999761581421f, 0.550000011920929f, 0.6000000238418579f, 0.6000000238418579f, 0.800000011920929f, 0.4000000059604645f}}, {1, {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f, 2.0f, 2.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 3.0f, 3.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 4.0f, 4.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 5.0f, 5.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 6.0f, 6.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 7.0f, 7.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 8.0f, 8.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f, 9.0f, 9.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 2.0f, 2.0f, 11.0f, 11.0f, 2.0f, 2.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 1.0f, 1.0f, 10.0f, 10.0f, 1.0f, 1.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 5.0f, 5.0f, 14.0f, 14.0f, 5.0f, 5.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 3.0f, 3.0f, 12.0f, 12.0f, 3.0f, 3.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 6.0f, 6.0f, 15.0f, 15.0f, 6.0f, 6.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 2.0f, 2.0f, 0.0f, 0.0f, 2.0f, 2.0f, 9.0f, 9.0f, 18.0f, 18.0f, 9.0f, 9.0f, 19.0f, 19.0f, 9.0f, 9.0f, 19.0f, 19.0f, 4.0f, 4.0f, 13.0f, 13.0f, 4.0f, 4.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 8.0f, 8.0f, 17.0f, 17.0f, 8.0f, 8.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 7.0f, 7.0f, 16.0f, 16.0f, 7.0f, 7.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f}}},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {12}}, {1, {12, 4}}, {2, {12}}, {3, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {{2, {1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 2, 2}}, {3, {5, 7}}},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {{0, {0.949999988079071f, 0.8500000238418579f, 0.75f, 0.949999988079071f, 0.699999988079071f, 0.949999988079071f, 0.8999999761581421f, 0.8500000238418579f, 0.75f, 0.949999988079071f, 0.800000011920929f, 0.699999988079071f}}, {1, {0.0f, 0.0f, 10.0f, 10.0f, 4.0f, 4.0f, 14.0f, 14.0f, 8.0f, 8.0f, 18.0f, 18.0f, 2.0f, 2.0f, 12.0f, 12.0f, 8.0f, 8.0f, 18.0f, 18.0f, 1.0f, 1.0f, 11.0f, 11.0f, 0.0f, 0.0f, 2.0f, 2.0f, 5.0f, 5.0f, 15.0f, 15.0f, 9.0f, 9.0f, 19.0f, 19.0f, 3.0f, 3.0f, 13.0f, 13.0f, 0.0f, 0.0f, 2.0f, 2.0f, 9.0f, 9.0f, 19.0f, 19.0f}}},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_dynamic_output_shape_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_2() {
+static std::vector<MixedTypedExample> examples_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {19, 3}}, {1, {19, 12}}, {2, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {0.9f, 0.95f, 0.75f, 0.8f, 0.7f, 0.85f, 0.6f, 0.9f, 0.95f, 0.9f, 0.65f, 0.9f, 0.8f, 0.85f, 0.8f, 0.6f, 0.6f, 0.2f, 0.6f, 0.8f, 0.4f, 0.9f, 0.55f, 0.6f, 0.9f, 0.75f, 0.7f, 0.8f, 0.7f, 0.85f, 0.9f, 0.95f, 0.75f, 0.8f, 0.85f, 0.8f, 0.6f, 0.9f, 0.95f, 0.6f, 0.6f, 0.2f, 0.5f, 0.9f, 0.8f, 0.9f, 0.75f, 0.7f, 0.9f, 0.65f, 0.9f, 0.9f, 0.55f, 0.6f, 0.6f, 0.8f, 0.4f}}, {1, {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f, 2.0f, 2.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 3.0f, 3.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 4.0f, 4.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 5.0f, 5.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 6.0f, 6.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 7.0f, 7.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 8.0f, 8.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f, 9.0f, 9.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 2.0f, 2.0f, 11.0f, 11.0f, 2.0f, 2.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 1.0f, 1.0f, 10.0f, 10.0f, 1.0f, 1.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 5.0f, 5.0f, 14.0f, 14.0f, 5.0f, 5.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 3.0f, 3.0f, 12.0f, 12.0f, 3.0f, 3.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 6.0f, 6.0f, 15.0f, 15.0f, 6.0f, 6.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 2.0f, 2.0f, 0.0f, 0.0f, 2.0f, 2.0f, 9.0f, 9.0f, 18.0f, 18.0f, 9.0f, 9.0f, 19.0f, 19.0f, 9.0f, 9.0f, 19.0f, 19.0f, 4.0f, 4.0f, 13.0f, 13.0f, 4.0f, 4.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 8.0f, 8.0f, 17.0f, 17.0f, 8.0f, 8.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 7.0f, 7.0f, 16.0f, 16.0f, 7.0f, 7.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f}}},
+ // int -> INT32 map
+ .int32Operands = {{2, {9, 10}}},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {10}}, {1, {10, 4}}, {2, {10}}, {3, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {0.95f, 0.85f, 0.75f, 0.95f, 0.7f, 0.95f, 0.9f, 0.85f, 0.95f, 0.8f}}, {1, {0.0f, 0.0f, 10.0f, 10.0f, 4.0f, 4.0f, 14.0f, 14.0f, 8.0f, 8.0f, 18.0f, 18.0f, 2.0f, 2.0f, 12.0f, 12.0f, 8.0f, 8.0f, 18.0f, 18.0f, 1.0f, 1.0f, 11.0f, 11.0f, 0.0f, 0.0f, 2.0f, 2.0f, 5.0f, 5.0f, 15.0f, 15.0f, 3.0f, 3.0f, 13.0f, 13.0f, 0.0f, 0.0f, 2.0f, 2.0f}}},
+ // int -> INT32 map
+ .int32Operands = {{2, {1, 1, 1, 2, 2, 1, 1, 1, 2, 2}}, {3, {5, 5}}},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_relaxed_2() {
+static std::vector<MixedTypedExample> examples_relaxed_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {19, 3}}, {1, {19, 12}}, {2, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {0.9f, 0.95f, 0.75f, 0.8f, 0.7f, 0.85f, 0.6f, 0.9f, 0.95f, 0.9f, 0.65f, 0.9f, 0.8f, 0.85f, 0.8f, 0.6f, 0.6f, 0.2f, 0.6f, 0.8f, 0.4f, 0.9f, 0.55f, 0.6f, 0.9f, 0.75f, 0.7f, 0.8f, 0.7f, 0.85f, 0.9f, 0.95f, 0.75f, 0.8f, 0.85f, 0.8f, 0.6f, 0.9f, 0.95f, 0.6f, 0.6f, 0.2f, 0.5f, 0.9f, 0.8f, 0.9f, 0.75f, 0.7f, 0.9f, 0.65f, 0.9f, 0.9f, 0.55f, 0.6f, 0.6f, 0.8f, 0.4f}}, {1, {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f, 2.0f, 2.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 3.0f, 3.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 4.0f, 4.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 5.0f, 5.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 6.0f, 6.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 7.0f, 7.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 8.0f, 8.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f, 9.0f, 9.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 2.0f, 2.0f, 11.0f, 11.0f, 2.0f, 2.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 1.0f, 1.0f, 10.0f, 10.0f, 1.0f, 1.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 5.0f, 5.0f, 14.0f, 14.0f, 5.0f, 5.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 3.0f, 3.0f, 12.0f, 12.0f, 3.0f, 3.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 6.0f, 6.0f, 15.0f, 15.0f, 6.0f, 6.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 2.0f, 2.0f, 0.0f, 0.0f, 2.0f, 2.0f, 9.0f, 9.0f, 18.0f, 18.0f, 9.0f, 9.0f, 19.0f, 19.0f, 9.0f, 9.0f, 19.0f, 19.0f, 4.0f, 4.0f, 13.0f, 13.0f, 4.0f, 4.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 8.0f, 8.0f, 17.0f, 17.0f, 8.0f, 8.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 7.0f, 7.0f, 16.0f, 16.0f, 7.0f, 7.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f}}},
+ // int -> INT32 map
+ .int32Operands = {{2, {9, 10}}},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {10}}, {1, {10, 4}}, {2, {10}}, {3, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {0.95f, 0.85f, 0.75f, 0.95f, 0.7f, 0.95f, 0.9f, 0.85f, 0.95f, 0.8f}}, {1, {0.0f, 0.0f, 10.0f, 10.0f, 4.0f, 4.0f, 14.0f, 14.0f, 8.0f, 8.0f, 18.0f, 18.0f, 2.0f, 2.0f, 12.0f, 12.0f, 8.0f, 8.0f, 18.0f, 18.0f, 1.0f, 1.0f, 11.0f, 11.0f, 0.0f, 0.0f, 2.0f, 2.0f, 5.0f, 5.0f, 15.0f, 15.0f, 3.0f, 3.0f, 13.0f, 13.0f, 0.0f, 0.0f, 2.0f, 2.0f}}},
+ // int -> INT32 map
+ .int32Operands = {{2, {1, 1, 1, 2, 2, 1, 1, 1, 2, 2}}, {3, {5, 5}}},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_relaxed_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_float16_2() {
+static std::vector<MixedTypedExample> examples_float16_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {19, 3}}, {1, {19, 12}}, {2, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {{2, {9, 10}}},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {{0, {0.8999999761581421f, 0.949999988079071f, 0.75f, 0.800000011920929f, 0.699999988079071f, 0.8500000238418579f, 0.6000000238418579f, 0.8999999761581421f, 0.949999988079071f, 0.8999999761581421f, 0.6499999761581421f, 0.8999999761581421f, 0.800000011920929f, 0.8500000238418579f, 0.800000011920929f, 0.6000000238418579f, 0.6000000238418579f, 0.20000000298023224f, 0.6000000238418579f, 0.800000011920929f, 0.4000000059604645f, 0.8999999761581421f, 0.550000011920929f, 0.6000000238418579f, 0.8999999761581421f, 0.75f, 0.699999988079071f, 0.800000011920929f, 0.699999988079071f, 0.8500000238418579f, 0.8999999761581421f, 0.949999988079071f, 0.75f, 0.800000011920929f, 0.8500000238418579f, 0.800000011920929f, 0.6000000238418579f, 0.8999999761581421f, 0.949999988079071f, 0.6000000238418579f, 0.6000000238418579f, 0.20000000298023224f, 0.5f, 0.8999999761581421f, 0.800000011920929f, 0.8999999761581421f, 0.75f, 0.699999988079071f, 0.8999999761581421f, 0.6499999761581421f, 0.8999999761581421f, 0.8999999761581421f, 0.550000011920929f, 0.6000000238418579f, 0.6000000238418579f, 0.800000011920929f, 0.4000000059604645f}}, {1, {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f, 2.0f, 2.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 3.0f, 3.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 4.0f, 4.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 5.0f, 5.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 6.0f, 6.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 7.0f, 7.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 8.0f, 8.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f, 9.0f, 9.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 2.0f, 2.0f, 11.0f, 11.0f, 2.0f, 2.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 1.0f, 1.0f, 10.0f, 10.0f, 1.0f, 1.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 5.0f, 5.0f, 14.0f, 14.0f, 5.0f, 5.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 3.0f, 3.0f, 12.0f, 12.0f, 3.0f, 3.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 6.0f, 6.0f, 15.0f, 15.0f, 6.0f, 6.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 2.0f, 2.0f, 0.0f, 0.0f, 2.0f, 2.0f, 9.0f, 9.0f, 18.0f, 18.0f, 9.0f, 9.0f, 19.0f, 19.0f, 9.0f, 9.0f, 19.0f, 19.0f, 4.0f, 4.0f, 13.0f, 13.0f, 4.0f, 4.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 8.0f, 8.0f, 17.0f, 17.0f, 8.0f, 8.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 7.0f, 7.0f, 16.0f, 16.0f, 7.0f, 7.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f}}},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {10}}, {1, {10, 4}}, {2, {10}}, {3, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {{2, {1, 1, 1, 2, 2, 1, 1, 1, 2, 2}}, {3, {5, 5}}},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {{0, {0.949999988079071f, 0.8500000238418579f, 0.75f, 0.949999988079071f, 0.699999988079071f, 0.949999988079071f, 0.8999999761581421f, 0.8500000238418579f, 0.949999988079071f, 0.800000011920929f}}, {1, {0.0f, 0.0f, 10.0f, 10.0f, 4.0f, 4.0f, 14.0f, 14.0f, 8.0f, 8.0f, 18.0f, 18.0f, 2.0f, 2.0f, 12.0f, 12.0f, 8.0f, 8.0f, 18.0f, 18.0f, 1.0f, 1.0f, 11.0f, 11.0f, 0.0f, 0.0f, 2.0f, 2.0f, 5.0f, 5.0f, 15.0f, 15.0f, 3.0f, 3.0f, 13.0f, 13.0f, 0.0f, 0.0f, 2.0f, 2.0f}}},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_float16_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_dynamic_output_shape_2() {
+static std::vector<MixedTypedExample> examples_dynamic_output_shape_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {19, 3}}, {1, {19, 12}}, {2, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {0.9f, 0.95f, 0.75f, 0.8f, 0.7f, 0.85f, 0.6f, 0.9f, 0.95f, 0.9f, 0.65f, 0.9f, 0.8f, 0.85f, 0.8f, 0.6f, 0.6f, 0.2f, 0.6f, 0.8f, 0.4f, 0.9f, 0.55f, 0.6f, 0.9f, 0.75f, 0.7f, 0.8f, 0.7f, 0.85f, 0.9f, 0.95f, 0.75f, 0.8f, 0.85f, 0.8f, 0.6f, 0.9f, 0.95f, 0.6f, 0.6f, 0.2f, 0.5f, 0.9f, 0.8f, 0.9f, 0.75f, 0.7f, 0.9f, 0.65f, 0.9f, 0.9f, 0.55f, 0.6f, 0.6f, 0.8f, 0.4f}}, {1, {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f, 2.0f, 2.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 3.0f, 3.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 4.0f, 4.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 5.0f, 5.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 6.0f, 6.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 7.0f, 7.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 8.0f, 8.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f, 9.0f, 9.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 2.0f, 2.0f, 11.0f, 11.0f, 2.0f, 2.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 1.0f, 1.0f, 10.0f, 10.0f, 1.0f, 1.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 5.0f, 5.0f, 14.0f, 14.0f, 5.0f, 5.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 3.0f, 3.0f, 12.0f, 12.0f, 3.0f, 3.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 6.0f, 6.0f, 15.0f, 15.0f, 6.0f, 6.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 2.0f, 2.0f, 0.0f, 0.0f, 2.0f, 2.0f, 9.0f, 9.0f, 18.0f, 18.0f, 9.0f, 9.0f, 19.0f, 19.0f, 9.0f, 9.0f, 19.0f, 19.0f, 4.0f, 4.0f, 13.0f, 13.0f, 4.0f, 4.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 8.0f, 8.0f, 17.0f, 17.0f, 8.0f, 8.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 7.0f, 7.0f, 16.0f, 16.0f, 7.0f, 7.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f}}},
+ // int -> INT32 map
+ .int32Operands = {{2, {9, 10}}},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {10}}, {1, {10, 4}}, {2, {10}}, {3, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {0.95f, 0.85f, 0.75f, 0.95f, 0.7f, 0.95f, 0.9f, 0.85f, 0.95f, 0.8f}}, {1, {0.0f, 0.0f, 10.0f, 10.0f, 4.0f, 4.0f, 14.0f, 14.0f, 8.0f, 8.0f, 18.0f, 18.0f, 2.0f, 2.0f, 12.0f, 12.0f, 8.0f, 8.0f, 18.0f, 18.0f, 1.0f, 1.0f, 11.0f, 11.0f, 0.0f, 0.0f, 2.0f, 2.0f, 5.0f, 5.0f, 15.0f, 15.0f, 3.0f, 3.0f, 13.0f, 13.0f, 0.0f, 0.0f, 2.0f, 2.0f}}},
+ // int -> INT32 map
+ .int32Operands = {{2, {1, 1, 1, 2, 2, 1, 1, 1, 2, 2}}, {3, {5, 5}}},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_dynamic_output_shape_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_dynamic_output_shape_relaxed_2() {
+static std::vector<MixedTypedExample> examples_dynamic_output_shape_relaxed_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {19, 3}}, {1, {19, 12}}, {2, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {0.9f, 0.95f, 0.75f, 0.8f, 0.7f, 0.85f, 0.6f, 0.9f, 0.95f, 0.9f, 0.65f, 0.9f, 0.8f, 0.85f, 0.8f, 0.6f, 0.6f, 0.2f, 0.6f, 0.8f, 0.4f, 0.9f, 0.55f, 0.6f, 0.9f, 0.75f, 0.7f, 0.8f, 0.7f, 0.85f, 0.9f, 0.95f, 0.75f, 0.8f, 0.85f, 0.8f, 0.6f, 0.9f, 0.95f, 0.6f, 0.6f, 0.2f, 0.5f, 0.9f, 0.8f, 0.9f, 0.75f, 0.7f, 0.9f, 0.65f, 0.9f, 0.9f, 0.55f, 0.6f, 0.6f, 0.8f, 0.4f}}, {1, {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f, 2.0f, 2.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 3.0f, 3.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 4.0f, 4.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 5.0f, 5.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 6.0f, 6.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 7.0f, 7.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 8.0f, 8.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f, 9.0f, 9.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 2.0f, 2.0f, 11.0f, 11.0f, 2.0f, 2.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 1.0f, 1.0f, 10.0f, 10.0f, 1.0f, 1.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 5.0f, 5.0f, 14.0f, 14.0f, 5.0f, 5.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 3.0f, 3.0f, 12.0f, 12.0f, 3.0f, 3.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 6.0f, 6.0f, 15.0f, 15.0f, 6.0f, 6.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 2.0f, 2.0f, 0.0f, 0.0f, 2.0f, 2.0f, 9.0f, 9.0f, 18.0f, 18.0f, 9.0f, 9.0f, 19.0f, 19.0f, 9.0f, 9.0f, 19.0f, 19.0f, 4.0f, 4.0f, 13.0f, 13.0f, 4.0f, 4.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 8.0f, 8.0f, 17.0f, 17.0f, 8.0f, 8.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 7.0f, 7.0f, 16.0f, 16.0f, 7.0f, 7.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f}}},
+ // int -> INT32 map
+ .int32Operands = {{2, {9, 10}}},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {10}}, {1, {10, 4}}, {2, {10}}, {3, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {0.95f, 0.85f, 0.75f, 0.95f, 0.7f, 0.95f, 0.9f, 0.85f, 0.95f, 0.8f}}, {1, {0.0f, 0.0f, 10.0f, 10.0f, 4.0f, 4.0f, 14.0f, 14.0f, 8.0f, 8.0f, 18.0f, 18.0f, 2.0f, 2.0f, 12.0f, 12.0f, 8.0f, 8.0f, 18.0f, 18.0f, 1.0f, 1.0f, 11.0f, 11.0f, 0.0f, 0.0f, 2.0f, 2.0f, 5.0f, 5.0f, 15.0f, 15.0f, 3.0f, 3.0f, 13.0f, 13.0f, 0.0f, 0.0f, 2.0f, 2.0f}}},
+ // int -> INT32 map
+ .int32Operands = {{2, {1, 1, 1, 2, 2, 1, 1, 1, 2, 2}}, {3, {5, 5}}},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_dynamic_output_shape_relaxed_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_dynamic_output_shape_float16_2() {
+static std::vector<MixedTypedExample> examples_dynamic_output_shape_float16_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {19, 3}}, {1, {19, 12}}, {2, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {{2, {9, 10}}},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {{0, {0.8999999761581421f, 0.949999988079071f, 0.75f, 0.800000011920929f, 0.699999988079071f, 0.8500000238418579f, 0.6000000238418579f, 0.8999999761581421f, 0.949999988079071f, 0.8999999761581421f, 0.6499999761581421f, 0.8999999761581421f, 0.800000011920929f, 0.8500000238418579f, 0.800000011920929f, 0.6000000238418579f, 0.6000000238418579f, 0.20000000298023224f, 0.6000000238418579f, 0.800000011920929f, 0.4000000059604645f, 0.8999999761581421f, 0.550000011920929f, 0.6000000238418579f, 0.8999999761581421f, 0.75f, 0.699999988079071f, 0.800000011920929f, 0.699999988079071f, 0.8500000238418579f, 0.8999999761581421f, 0.949999988079071f, 0.75f, 0.800000011920929f, 0.8500000238418579f, 0.800000011920929f, 0.6000000238418579f, 0.8999999761581421f, 0.949999988079071f, 0.6000000238418579f, 0.6000000238418579f, 0.20000000298023224f, 0.5f, 0.8999999761581421f, 0.800000011920929f, 0.8999999761581421f, 0.75f, 0.699999988079071f, 0.8999999761581421f, 0.6499999761581421f, 0.8999999761581421f, 0.8999999761581421f, 0.550000011920929f, 0.6000000238418579f, 0.6000000238418579f, 0.800000011920929f, 0.4000000059604645f}}, {1, {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f, 2.0f, 2.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 3.0f, 3.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 4.0f, 4.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 5.0f, 5.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 6.0f, 6.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 7.0f, 7.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 8.0f, 8.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f, 9.0f, 9.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 2.0f, 2.0f, 11.0f, 11.0f, 2.0f, 2.0f, 12.0f, 12.0f, 2.0f, 2.0f, 12.0f, 12.0f, 1.0f, 1.0f, 10.0f, 10.0f, 1.0f, 1.0f, 11.0f, 11.0f, 1.0f, 1.0f, 11.0f, 11.0f, 5.0f, 5.0f, 14.0f, 14.0f, 5.0f, 5.0f, 15.0f, 15.0f, 5.0f, 5.0f, 15.0f, 15.0f, 3.0f, 3.0f, 12.0f, 12.0f, 3.0f, 3.0f, 13.0f, 13.0f, 3.0f, 3.0f, 13.0f, 13.0f, 6.0f, 6.0f, 15.0f, 15.0f, 6.0f, 6.0f, 16.0f, 16.0f, 6.0f, 6.0f, 16.0f, 16.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 2.0f, 2.0f, 0.0f, 0.0f, 2.0f, 2.0f, 9.0f, 9.0f, 18.0f, 18.0f, 9.0f, 9.0f, 19.0f, 19.0f, 9.0f, 9.0f, 19.0f, 19.0f, 4.0f, 4.0f, 13.0f, 13.0f, 4.0f, 4.0f, 14.0f, 14.0f, 4.0f, 4.0f, 14.0f, 14.0f, 8.0f, 8.0f, 17.0f, 17.0f, 8.0f, 8.0f, 18.0f, 18.0f, 8.0f, 8.0f, 18.0f, 18.0f, 7.0f, 7.0f, 16.0f, 16.0f, 7.0f, 7.0f, 17.0f, 17.0f, 7.0f, 7.0f, 17.0f, 17.0f}}},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {10}}, {1, {10, 4}}, {2, {10}}, {3, {2}}},
+ // int -> FLOAT32 map
+ .float32Operands = {},
+ // int -> INT32 map
+ .int32Operands = {{2, {1, 1, 1, 2, 2, 1, 1, 1, 2, 2}}, {3, {5, 5}}},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {{0, {0.949999988079071f, 0.8500000238418579f, 0.75f, 0.949999988079071f, 0.699999988079071f, 0.949999988079071f, 0.8999999761581421f, 0.8500000238418579f, 0.949999988079071f, 0.800000011920929f}}, {1, {0.0f, 0.0f, 10.0f, 10.0f, 4.0f, 4.0f, 14.0f, 14.0f, 8.0f, 8.0f, 18.0f, 18.0f, 2.0f, 2.0f, 12.0f, 12.0f, 8.0f, 8.0f, 18.0f, 18.0f, 1.0f, 1.0f, 11.0f, 11.0f, 0.0f, 0.0f, 2.0f, 2.0f, 5.0f, 5.0f, 15.0f, 15.0f, 3.0f, 3.0f, 13.0f, 13.0f, 0.0f, 0.0f, 2.0f, 2.0f}}},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_dynamic_output_shape_float16_2;
+};
+
diff --git a/runtime/test/generated/models/box_with_nms_limit.model.cpp b/runtime/test/generated/models/box_with_nms_limit.model.cpp
new file mode 100644
index 0000000..39e9059
--- /dev/null
+++ b/runtime/test/generated/models/box_with_nms_limit.model.cpp
@@ -0,0 +1,490 @@
+// clang-format off
+// Generated file (from: box_with_nms_limit.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT32, {19, 3});
+ OperandType type1(Type::TENSOR_FLOAT32, {19, 12});
+ OperandType type2(Type::TENSOR_INT32, {2});
+ OperandType type3(Type::TENSOR_FLOAT32, {12});
+ OperandType type4(Type::TENSOR_FLOAT32, {12, 4});
+ OperandType type5(Type::TENSOR_INT32, {12});
+ OperandType type6(Type::FLOAT32, {});
+ OperandType type7(Type::INT32, {});
+ // Phase 1, operands
+ auto scores = model->addOperand(&type0);
+ auto roi = model->addOperand(&type1);
+ auto batchSplit = model->addOperand(&type2);
+ auto param = model->addOperand(&type6);
+ auto param1 = model->addOperand(&type6);
+ auto param2 = model->addOperand(&type7);
+ auto scoresOut = model->addOperand(&type3);
+ auto roiOut = model->addOperand(&type4);
+ auto classesOut = model->addOperand(&type5);
+ auto batchSplitOut = model->addOperand(&type2);
+ // Phase 2, operations
+ static float param_init[] = {0.3f};
+ model->setOperandValue(param, param_init, sizeof(float) * 1);
+ static float param1_init[] = {0.4f};
+ model->setOperandValue(param1, param1_init, sizeof(float) * 1);
+ static int32_t param2_init[] = {-1};
+ model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, batchSplit, param, param1, param2}, {scoresOut, roiOut, classesOut, batchSplitOut});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {scores, roi, batchSplit},
+ {scoresOut, roiOut, classesOut, batchSplitOut});
+ assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT32, {19, 3});
+ OperandType type1(Type::TENSOR_FLOAT32, {19, 12});
+ OperandType type2(Type::TENSOR_INT32, {2});
+ OperandType type3(Type::TENSOR_FLOAT32, {12});
+ OperandType type4(Type::TENSOR_FLOAT32, {12, 4});
+ OperandType type5(Type::TENSOR_INT32, {12});
+ OperandType type6(Type::FLOAT32, {});
+ OperandType type7(Type::INT32, {});
+ // Phase 1, operands
+ auto scores = model->addOperand(&type0);
+ auto roi = model->addOperand(&type1);
+ auto batchSplit = model->addOperand(&type2);
+ auto param = model->addOperand(&type6);
+ auto param1 = model->addOperand(&type6);
+ auto param2 = model->addOperand(&type7);
+ auto scoresOut = model->addOperand(&type3);
+ auto roiOut = model->addOperand(&type4);
+ auto classesOut = model->addOperand(&type5);
+ auto batchSplitOut = model->addOperand(&type2);
+ // Phase 2, operations
+ static float param_init[] = {0.3f};
+ model->setOperandValue(param, param_init, sizeof(float) * 1);
+ static float param1_init[] = {0.4f};
+ model->setOperandValue(param1, param1_init, sizeof(float) * 1);
+ static int32_t param2_init[] = {-1};
+ model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, batchSplit, param, param1, param2}, {scoresOut, roiOut, classesOut, batchSplitOut});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {scores, roi, batchSplit},
+ {scoresOut, roiOut, classesOut, batchSplitOut});
+ // Phase 4: set relaxed execution
+ model->relaxComputationFloat32toFloat16(true);
+ assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16(Model *model) {
+ OperandType type11(Type::FLOAT16, {});
+ OperandType type12(Type::TENSOR_FLOAT16, {19, 12});
+ OperandType type13(Type::TENSOR_FLOAT16, {12, 4});
+ OperandType type14(Type::TENSOR_FLOAT16, {19, 3});
+ OperandType type15(Type::TENSOR_FLOAT16, {12});
+ OperandType type2(Type::TENSOR_INT32, {2});
+ OperandType type5(Type::TENSOR_INT32, {12});
+ OperandType type7(Type::INT32, {});
+ // Phase 1, operands
+ auto scores = model->addOperand(&type14);
+ auto roi = model->addOperand(&type12);
+ auto batchSplit = model->addOperand(&type2);
+ auto param = model->addOperand(&type11);
+ auto param1 = model->addOperand(&type11);
+ auto param2 = model->addOperand(&type7);
+ auto scoresOut = model->addOperand(&type15);
+ auto roiOut = model->addOperand(&type13);
+ auto classesOut = model->addOperand(&type5);
+ auto batchSplitOut = model->addOperand(&type2);
+ // Phase 2, operations
+ static _Float16 param_init[] = {0.30000001192092896f};
+ model->setOperandValue(param, param_init, sizeof(_Float16) * 1);
+ static _Float16 param1_init[] = {0.4000000059604645f};
+ model->setOperandValue(param1, param1_init, sizeof(_Float16) * 1);
+ static int32_t param2_init[] = {-1};
+ model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, batchSplit, param, param1, param2}, {scoresOut, roiOut, classesOut, batchSplitOut});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {scores, roi, batchSplit},
+ {scoresOut, roiOut, classesOut, batchSplitOut});
+ assert(model->isValid());
+}
+
+inline bool is_ignored_float16(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_dynamic_output_shape(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT32, {19, 3});
+ OperandType type1(Type::TENSOR_FLOAT32, {19, 12});
+ OperandType type16(Type::TENSOR_FLOAT32, {0});
+ OperandType type17(Type::TENSOR_FLOAT32, {0, 0});
+ OperandType type18(Type::TENSOR_INT32, {0});
+ OperandType type2(Type::TENSOR_INT32, {2});
+ OperandType type6(Type::FLOAT32, {});
+ OperandType type7(Type::INT32, {});
+ // Phase 1, operands
+ auto scores = model->addOperand(&type0);
+ auto roi = model->addOperand(&type1);
+ auto batchSplit = model->addOperand(&type2);
+ auto param = model->addOperand(&type6);
+ auto param1 = model->addOperand(&type6);
+ auto param2 = model->addOperand(&type7);
+ auto scoresOut = model->addOperand(&type16);
+ auto roiOut = model->addOperand(&type17);
+ auto classesOut = model->addOperand(&type18);
+ auto batchSplitOut = model->addOperand(&type18);
+ // Phase 2, operations
+ static float param_init[] = {0.3f};
+ model->setOperandValue(param, param_init, sizeof(float) * 1);
+ static float param1_init[] = {0.4f};
+ model->setOperandValue(param1, param1_init, sizeof(float) * 1);
+ static int32_t param2_init[] = {-1};
+ model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, batchSplit, param, param1, param2}, {scoresOut, roiOut, classesOut, batchSplitOut});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {scores, roi, batchSplit},
+ {scoresOut, roiOut, classesOut, batchSplitOut});
+ assert(model->isValid());
+}
+
+inline bool is_ignored_dynamic_output_shape(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_dynamic_output_shape_relaxed(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT32, {19, 3});
+ OperandType type1(Type::TENSOR_FLOAT32, {19, 12});
+ OperandType type16(Type::TENSOR_FLOAT32, {0});
+ OperandType type17(Type::TENSOR_FLOAT32, {0, 0});
+ OperandType type18(Type::TENSOR_INT32, {0});
+ OperandType type2(Type::TENSOR_INT32, {2});
+ OperandType type6(Type::FLOAT32, {});
+ OperandType type7(Type::INT32, {});
+ // Phase 1, operands
+ auto scores = model->addOperand(&type0);
+ auto roi = model->addOperand(&type1);
+ auto batchSplit = model->addOperand(&type2);
+ auto param = model->addOperand(&type6);
+ auto param1 = model->addOperand(&type6);
+ auto param2 = model->addOperand(&type7);
+ auto scoresOut = model->addOperand(&type16);
+ auto roiOut = model->addOperand(&type17);
+ auto classesOut = model->addOperand(&type18);
+ auto batchSplitOut = model->addOperand(&type18);
+ // Phase 2, operations
+ static float param_init[] = {0.3f};
+ model->setOperandValue(param, param_init, sizeof(float) * 1);
+ static float param1_init[] = {0.4f};
+ model->setOperandValue(param1, param1_init, sizeof(float) * 1);
+ static int32_t param2_init[] = {-1};
+ model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, batchSplit, param, param1, param2}, {scoresOut, roiOut, classesOut, batchSplitOut});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {scores, roi, batchSplit},
+ {scoresOut, roiOut, classesOut, batchSplitOut});
+ // Phase 4: set relaxed execution
+ model->relaxComputationFloat32toFloat16(true);
+ assert(model->isValid());
+}
+
+inline bool is_ignored_dynamic_output_shape_relaxed(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_dynamic_output_shape_float16(Model *model) {
+ OperandType type11(Type::FLOAT16, {});
+ OperandType type12(Type::TENSOR_FLOAT16, {19, 12});
+ OperandType type14(Type::TENSOR_FLOAT16, {19, 3});
+ OperandType type18(Type::TENSOR_INT32, {0});
+ OperandType type19(Type::TENSOR_FLOAT16, {0});
+ OperandType type2(Type::TENSOR_INT32, {2});
+ OperandType type20(Type::TENSOR_FLOAT16, {0, 0});
+ OperandType type7(Type::INT32, {});
+ // Phase 1, operands
+ auto scores = model->addOperand(&type14);
+ auto roi = model->addOperand(&type12);
+ auto batchSplit = model->addOperand(&type2);
+ auto param = model->addOperand(&type11);
+ auto param1 = model->addOperand(&type11);
+ auto param2 = model->addOperand(&type7);
+ auto scoresOut = model->addOperand(&type19);
+ auto roiOut = model->addOperand(&type20);
+ auto classesOut = model->addOperand(&type18);
+ auto batchSplitOut = model->addOperand(&type18);
+ // Phase 2, operations
+ static _Float16 param_init[] = {0.30000001192092896f};
+ model->setOperandValue(param, param_init, sizeof(_Float16) * 1);
+ static _Float16 param1_init[] = {0.4000000059604645f};
+ model->setOperandValue(param1, param1_init, sizeof(_Float16) * 1);
+ static int32_t param2_init[] = {-1};
+ model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, batchSplit, param, param1, param2}, {scoresOut, roiOut, classesOut, batchSplitOut});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {scores, roi, batchSplit},
+ {scoresOut, roiOut, classesOut, batchSplitOut});
+ assert(model->isValid());
+}
+
+inline bool is_ignored_dynamic_output_shape_float16(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_2(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT32, {19, 3});
+ OperandType type1(Type::TENSOR_FLOAT32, {19, 12});
+ OperandType type10(Type::TENSOR_INT32, {10});
+ OperandType type2(Type::TENSOR_INT32, {2});
+ OperandType type6(Type::FLOAT32, {});
+ OperandType type7(Type::INT32, {});
+ OperandType type8(Type::TENSOR_FLOAT32, {10});
+ OperandType type9(Type::TENSOR_FLOAT32, {10, 4});
+ // Phase 1, operands
+ auto scores1 = model->addOperand(&type0);
+ auto roi1 = model->addOperand(&type1);
+ auto batchSplit1 = model->addOperand(&type2);
+ auto param3 = model->addOperand(&type6);
+ auto param4 = model->addOperand(&type6);
+ auto param5 = model->addOperand(&type7);
+ auto scoresOut1 = model->addOperand(&type8);
+ auto roiOut1 = model->addOperand(&type9);
+ auto classesOut1 = model->addOperand(&type10);
+ auto batchSplitOut1 = model->addOperand(&type2);
+ // Phase 2, operations
+ static float param3_init[] = {0.3f};
+ model->setOperandValue(param3, param3_init, sizeof(float) * 1);
+ static float param4_init[] = {0.4f};
+ model->setOperandValue(param4, param4_init, sizeof(float) * 1);
+ static int32_t param5_init[] = {5};
+ model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores1, roi1, batchSplit1, param3, param4, param5}, {scoresOut1, roiOut1, classesOut1, batchSplitOut1});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {scores1, roi1, batchSplit1},
+ {scoresOut1, roiOut1, classesOut1, batchSplitOut1});
+ assert(model->isValid());
+}
+
+inline bool is_ignored_2(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed_2(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT32, {19, 3});
+ OperandType type1(Type::TENSOR_FLOAT32, {19, 12});
+ OperandType type10(Type::TENSOR_INT32, {10});
+ OperandType type2(Type::TENSOR_INT32, {2});
+ OperandType type6(Type::FLOAT32, {});
+ OperandType type7(Type::INT32, {});
+ OperandType type8(Type::TENSOR_FLOAT32, {10});
+ OperandType type9(Type::TENSOR_FLOAT32, {10, 4});
+ // Phase 1, operands
+ auto scores1 = model->addOperand(&type0);
+ auto roi1 = model->addOperand(&type1);
+ auto batchSplit1 = model->addOperand(&type2);
+ auto param3 = model->addOperand(&type6);
+ auto param4 = model->addOperand(&type6);
+ auto param5 = model->addOperand(&type7);
+ auto scoresOut1 = model->addOperand(&type8);
+ auto roiOut1 = model->addOperand(&type9);
+ auto classesOut1 = model->addOperand(&type10);
+ auto batchSplitOut1 = model->addOperand(&type2);
+ // Phase 2, operations
+ static float param3_init[] = {0.3f};
+ model->setOperandValue(param3, param3_init, sizeof(float) * 1);
+ static float param4_init[] = {0.4f};
+ model->setOperandValue(param4, param4_init, sizeof(float) * 1);
+ static int32_t param5_init[] = {5};
+ model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores1, roi1, batchSplit1, param3, param4, param5}, {scoresOut1, roiOut1, classesOut1, batchSplitOut1});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {scores1, roi1, batchSplit1},
+ {scoresOut1, roiOut1, classesOut1, batchSplitOut1});
+ // Phase 4: set relaxed execution
+ model->relaxComputationFloat32toFloat16(true);
+ assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed_2(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16_2(Model *model) {
+ OperandType type10(Type::TENSOR_INT32, {10});
+ OperandType type11(Type::FLOAT16, {});
+ OperandType type12(Type::TENSOR_FLOAT16, {19, 12});
+ OperandType type14(Type::TENSOR_FLOAT16, {19, 3});
+ OperandType type2(Type::TENSOR_INT32, {2});
+ OperandType type21(Type::TENSOR_FLOAT16, {10, 4});
+ OperandType type22(Type::TENSOR_FLOAT16, {10});
+ OperandType type7(Type::INT32, {});
+ // Phase 1, operands
+ auto scores1 = model->addOperand(&type14);
+ auto roi1 = model->addOperand(&type12);
+ auto batchSplit1 = model->addOperand(&type2);
+ auto param3 = model->addOperand(&type11);
+ auto param4 = model->addOperand(&type11);
+ auto param5 = model->addOperand(&type7);
+ auto scoresOut1 = model->addOperand(&type22);
+ auto roiOut1 = model->addOperand(&type21);
+ auto classesOut1 = model->addOperand(&type10);
+ auto batchSplitOut1 = model->addOperand(&type2);
+ // Phase 2, operations
+ static _Float16 param3_init[] = {0.30000001192092896f};
+ model->setOperandValue(param3, param3_init, sizeof(_Float16) * 1);
+ static _Float16 param4_init[] = {0.4000000059604645f};
+ model->setOperandValue(param4, param4_init, sizeof(_Float16) * 1);
+ static int32_t param5_init[] = {5};
+ model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores1, roi1, batchSplit1, param3, param4, param5}, {scoresOut1, roiOut1, classesOut1, batchSplitOut1});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {scores1, roi1, batchSplit1},
+ {scoresOut1, roiOut1, classesOut1, batchSplitOut1});
+ assert(model->isValid());
+}
+
+inline bool is_ignored_float16_2(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_dynamic_output_shape_2(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT32, {19, 3});
+ OperandType type1(Type::TENSOR_FLOAT32, {19, 12});
+ OperandType type16(Type::TENSOR_FLOAT32, {0});
+ OperandType type17(Type::TENSOR_FLOAT32, {0, 0});
+ OperandType type18(Type::TENSOR_INT32, {0});
+ OperandType type2(Type::TENSOR_INT32, {2});
+ OperandType type6(Type::FLOAT32, {});
+ OperandType type7(Type::INT32, {});
+ // Phase 1, operands
+ auto scores1 = model->addOperand(&type0);
+ auto roi1 = model->addOperand(&type1);
+ auto batchSplit1 = model->addOperand(&type2);
+ auto param3 = model->addOperand(&type6);
+ auto param4 = model->addOperand(&type6);
+ auto param5 = model->addOperand(&type7);
+ auto scoresOut1 = model->addOperand(&type16);
+ auto roiOut1 = model->addOperand(&type17);
+ auto classesOut1 = model->addOperand(&type18);
+ auto batchSplitOut1 = model->addOperand(&type18);
+ // Phase 2, operations
+ static float param3_init[] = {0.3f};
+ model->setOperandValue(param3, param3_init, sizeof(float) * 1);
+ static float param4_init[] = {0.4f};
+ model->setOperandValue(param4, param4_init, sizeof(float) * 1);
+ static int32_t param5_init[] = {5};
+ model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores1, roi1, batchSplit1, param3, param4, param5}, {scoresOut1, roiOut1, classesOut1, batchSplitOut1});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {scores1, roi1, batchSplit1},
+ {scoresOut1, roiOut1, classesOut1, batchSplitOut1});
+ assert(model->isValid());
+}
+
+inline bool is_ignored_dynamic_output_shape_2(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_dynamic_output_shape_relaxed_2(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT32, {19, 3});
+ OperandType type1(Type::TENSOR_FLOAT32, {19, 12});
+ OperandType type16(Type::TENSOR_FLOAT32, {0});
+ OperandType type17(Type::TENSOR_FLOAT32, {0, 0});
+ OperandType type18(Type::TENSOR_INT32, {0});
+ OperandType type2(Type::TENSOR_INT32, {2});
+ OperandType type6(Type::FLOAT32, {});
+ OperandType type7(Type::INT32, {});
+ // Phase 1, operands
+ auto scores1 = model->addOperand(&type0);
+ auto roi1 = model->addOperand(&type1);
+ auto batchSplit1 = model->addOperand(&type2);
+ auto param3 = model->addOperand(&type6);
+ auto param4 = model->addOperand(&type6);
+ auto param5 = model->addOperand(&type7);
+ auto scoresOut1 = model->addOperand(&type16);
+ auto roiOut1 = model->addOperand(&type17);
+ auto classesOut1 = model->addOperand(&type18);
+ auto batchSplitOut1 = model->addOperand(&type18);
+ // Phase 2, operations
+ static float param3_init[] = {0.3f};
+ model->setOperandValue(param3, param3_init, sizeof(float) * 1);
+ static float param4_init[] = {0.4f};
+ model->setOperandValue(param4, param4_init, sizeof(float) * 1);
+ static int32_t param5_init[] = {5};
+ model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores1, roi1, batchSplit1, param3, param4, param5}, {scoresOut1, roiOut1, classesOut1, batchSplitOut1});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {scores1, roi1, batchSplit1},
+ {scoresOut1, roiOut1, classesOut1, batchSplitOut1});
+ // Phase 4: set relaxed execution
+ model->relaxComputationFloat32toFloat16(true);
+ assert(model->isValid());
+}
+
+inline bool is_ignored_dynamic_output_shape_relaxed_2(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_dynamic_output_shape_float16_2(Model *model) {
+ OperandType type11(Type::FLOAT16, {});
+ OperandType type12(Type::TENSOR_FLOAT16, {19, 12});
+ OperandType type14(Type::TENSOR_FLOAT16, {19, 3});
+ OperandType type18(Type::TENSOR_INT32, {0});
+ OperandType type19(Type::TENSOR_FLOAT16, {0});
+ OperandType type2(Type::TENSOR_INT32, {2});
+ OperandType type20(Type::TENSOR_FLOAT16, {0, 0});
+ OperandType type7(Type::INT32, {});
+ // Phase 1, operands
+ auto scores1 = model->addOperand(&type14);
+ auto roi1 = model->addOperand(&type12);
+ auto batchSplit1 = model->addOperand(&type2);
+ auto param3 = model->addOperand(&type11);
+ auto param4 = model->addOperand(&type11);
+ auto param5 = model->addOperand(&type7);
+ auto scoresOut1 = model->addOperand(&type19);
+ auto roiOut1 = model->addOperand(&type20);
+ auto classesOut1 = model->addOperand(&type18);
+ auto batchSplitOut1 = model->addOperand(&type18);
+ // Phase 2, operations
+ static _Float16 param3_init[] = {0.30000001192092896f};
+ model->setOperandValue(param3, param3_init, sizeof(_Float16) * 1);
+ static _Float16 param4_init[] = {0.4000000059604645f};
+ model->setOperandValue(param4, param4_init, sizeof(_Float16) * 1);
+ static int32_t param5_init[] = {5};
+ model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores1, roi1, batchSplit1, param3, param4, param5}, {scoresOut1, roiOut1, classesOut1, batchSplitOut1});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {scores1, roi1, batchSplit1},
+ {scoresOut1, roiOut1, classesOut1, batchSplitOut1});
+ assert(model->isValid());
+}
+
+inline bool is_ignored_dynamic_output_shape_float16_2(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
diff --git a/runtime/test/generated/tests/box_with_nms_limit.mod.py.cpp b/runtime/test/generated/tests/box_with_nms_limit.mod.py.cpp
new file mode 100644
index 0000000..7085fee
--- /dev/null
+++ b/runtime/test/generated/tests/box_with_nms_limit.mod.py.cpp
@@ -0,0 +1,95 @@
+// clang-format off
+// Generated file (from: box_with_nms_limit.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace box_with_nms_limit {
+// Generated box_with_nms_limit test
+#include "generated/examples/box_with_nms_limit.example.cpp"
+// Generated model constructor
+#include "generated/models/box_with_nms_limit.model.cpp"
+} // namespace box_with_nms_limit
+
+TEST_F(GeneratedTests, box_with_nms_limit) {
+ execute(box_with_nms_limit::CreateModel,
+ box_with_nms_limit::is_ignored,
+ box_with_nms_limit::get_examples());
+}
+
+TEST_F(GeneratedTests, box_with_nms_limit_relaxed) {
+ execute(box_with_nms_limit::CreateModel_relaxed,
+ box_with_nms_limit::is_ignored_relaxed,
+ box_with_nms_limit::get_examples_relaxed());
+}
+
+TEST_F(GeneratedTests, box_with_nms_limit_float16) {
+ execute(box_with_nms_limit::CreateModel_float16,
+ box_with_nms_limit::is_ignored_float16,
+ box_with_nms_limit::get_examples_float16());
+}
+
+#if 0
+TEST_F(DynamicOutputShapeTests, box_with_nms_limit_dynamic_output_shape) {
+ execute(box_with_nms_limit::CreateModel_dynamic_output_shape,
+ box_with_nms_limit::is_ignored_dynamic_output_shape,
+ box_with_nms_limit::get_examples_dynamic_output_shape());
+}
+
+#endif
+#if 0
+TEST_F(DynamicOutputShapeTests, box_with_nms_limit_dynamic_output_shape_relaxed) {
+ execute(box_with_nms_limit::CreateModel_dynamic_output_shape_relaxed,
+ box_with_nms_limit::is_ignored_dynamic_output_shape_relaxed,
+ box_with_nms_limit::get_examples_dynamic_output_shape_relaxed());
+}
+
+#endif
+#if 0
+TEST_F(DynamicOutputShapeTests, box_with_nms_limit_dynamic_output_shape_float16) {
+ execute(box_with_nms_limit::CreateModel_dynamic_output_shape_float16,
+ box_with_nms_limit::is_ignored_dynamic_output_shape_float16,
+ box_with_nms_limit::get_examples_dynamic_output_shape_float16());
+}
+
+#endif
+TEST_F(GeneratedTests, box_with_nms_limit_2) {
+ execute(box_with_nms_limit::CreateModel_2,
+ box_with_nms_limit::is_ignored_2,
+ box_with_nms_limit::get_examples_2());
+}
+
+TEST_F(GeneratedTests, box_with_nms_limit_relaxed_2) {
+ execute(box_with_nms_limit::CreateModel_relaxed_2,
+ box_with_nms_limit::is_ignored_relaxed_2,
+ box_with_nms_limit::get_examples_relaxed_2());
+}
+
+TEST_F(GeneratedTests, box_with_nms_limit_float16_2) {
+ execute(box_with_nms_limit::CreateModel_float16_2,
+ box_with_nms_limit::is_ignored_float16_2,
+ box_with_nms_limit::get_examples_float16_2());
+}
+
+#if 0
+TEST_F(DynamicOutputShapeTests, box_with_nms_limit_dynamic_output_shape_2) {
+ execute(box_with_nms_limit::CreateModel_dynamic_output_shape_2,
+ box_with_nms_limit::is_ignored_dynamic_output_shape_2,
+ box_with_nms_limit::get_examples_dynamic_output_shape_2());
+}
+
+#endif
+#if 0
+TEST_F(DynamicOutputShapeTests, box_with_nms_limit_dynamic_output_shape_relaxed_2) {
+ execute(box_with_nms_limit::CreateModel_dynamic_output_shape_relaxed_2,
+ box_with_nms_limit::is_ignored_dynamic_output_shape_relaxed_2,
+ box_with_nms_limit::get_examples_dynamic_output_shape_relaxed_2());
+}
+
+#endif
+#if 0
+TEST_F(DynamicOutputShapeTests, box_with_nms_limit_dynamic_output_shape_float16_2) {
+ execute(box_with_nms_limit::CreateModel_dynamic_output_shape_float16_2,
+ box_with_nms_limit::is_ignored_dynamic_output_shape_float16_2,
+ box_with_nms_limit::get_examples_dynamic_output_shape_float16_2());
+}
+
+#endif
diff --git a/runtime/test/specs/V1_2/box_with_nms_limit.mod.py b/runtime/test/specs/V1_2/box_with_nms_limit.mod.py
new file mode 100644
index 0000000..8ae4a52
--- /dev/null
+++ b/runtime/test/specs/V1_2/box_with_nms_limit.mod.py
@@ -0,0 +1,176 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# TEST 1: BOX_WITH_NMS_LIMIT, score_threshold = 0.3, nms_threshold = 0.4, max_detections = -1
+model = Model()
+i1 = Input("scores", "TENSOR_FLOAT32", "{19, 3}") # scores
+i2 = Input("roi", "TENSOR_FLOAT32", "{19, 12}") # roi
+i3 = Input("batchSplit", "TENSOR_INT32", "{2}") # batchSplit
+
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{12}") # scores out
+o2 = Output("roiOut", "TENSOR_FLOAT32", "{12, 4}") # roi out
+o3 = Output("classesOut", "TENSOR_INT32", "{12}") # classes out
+o4 = Output("batchSplitOut", "TENSOR_INT32", "{2}") # batch split out
+model = model.Operation("BOX_WITH_NMS_LIMIT",
+ i1, i2, i3, 0.3, 0.4, -1).To(o1, o2, o3, o4)
+
+input0 = {
+ i1: [ # scores
+ 0.90, 0.95, 0.75,
+ 0.80, 0.70, 0.85,
+ 0.60, 0.90, 0.95,
+ 0.90, 0.65, 0.90,
+ 0.80, 0.85, 0.80,
+ 0.60, 0.60, 0.20,
+ 0.60, 0.80, 0.40,
+ 0.90, 0.55, 0.60,
+ 0.90, 0.75, 0.70,
+ 0.80, 0.70, 0.85,
+ 0.90, 0.95, 0.75,
+ 0.80, 0.85, 0.80,
+ 0.60, 0.90, 0.95,
+ 0.60, 0.60, 0.20,
+ 0.50, 0.90, 0.80,
+ 0.90, 0.75, 0.70,
+ 0.90, 0.65, 0.90,
+ 0.90, 0.55, 0.60,
+ 0.60, 0.80, 0.40
+ ],
+ i2: [ # roi
+ 1, 1, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10,
+ 2, 2, 11, 11, 1, 1, 11, 11, 1, 1, 11, 11,
+ 3, 3, 12, 12, 2, 2, 12, 12, 2, 2, 12, 12,
+ 4, 4, 13, 13, 3, 3, 13, 13, 3, 3, 13, 13,
+ 5, 5, 14, 14, 4, 4, 14, 14, 4, 4, 14, 14,
+ 6, 6, 15, 15, 5, 5, 15, 15, 5, 5, 15, 15,
+ 7, 7, 16, 16, 6, 6, 16, 16, 6, 6, 16, 16,
+ 8, 8, 17, 17, 7, 7, 17, 17, 7, 7, 17, 17,
+ 9, 9, 18, 18, 8, 8, 18, 18, 8, 8, 18, 18,
+ 2, 2, 11, 11, 2, 2, 12, 12, 2, 2, 12, 12,
+ 1, 1, 10, 10, 1, 1, 11, 11, 1, 1, 11, 11,
+ 5, 5, 14, 14, 5, 5, 15, 15, 5, 5, 15, 15,
+ 3, 3, 12, 12, 3, 3, 13, 13, 3, 3, 13, 13,
+ 6, 6, 15, 15, 6, 6, 16, 16, 6, 6, 16, 16,
+ 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 2, 2,
+ 9, 9, 18, 18, 9, 9, 19, 19, 9, 9, 19, 19,
+ 4, 4, 13, 13, 4, 4, 14, 14, 4, 4, 14, 14,
+ 8, 8, 17, 17, 8, 8, 18, 18, 8, 8, 18, 18,
+ 7, 7, 16, 16, 7, 7, 17, 17, 7, 7, 17, 17
+ ],
+ i3: [9, 10] # batch split
+}
+
+output0 = {
+ o1: [0.95, 0.85, 0.75, 0.95, 0.7, 0.95, 0.9, 0.85, 0.75, 0.95, 0.8, 0.7],
+ o2: [
+ 0, 0, 10, 10,
+ 4, 4, 14, 14,
+ 8, 8, 18, 18,
+ 2, 2, 12, 12,
+ 8, 8, 18, 18,
+ 1, 1, 11, 11,
+ 0, 0, 2, 2,
+ 5, 5, 15, 15,
+ 9, 9, 19, 19,
+ 3, 3, 13, 13,
+ 0, 0, 2, 2,
+ 9, 9, 19, 19
+ ],
+ o3: [1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 2, 2],
+ o4: [5, 7],
+}
+
+Example((input0, output0)).AddVariations("relaxed", "float16")
+
+
+# TEST 2: BOX_WITH_NMS_LIMIT, score_threshold = 0.3, nms_threshold = 0.4, max_detections = 5
+model = Model()
+i1 = Input("scores", "TENSOR_FLOAT32", "{19, 3}") # scores
+i2 = Input("roi", "TENSOR_FLOAT32", "{19, 12}") # roi
+i3 = Input("batchSplit", "TENSOR_INT32", "{2}") # batchSplit
+
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{10}") # scores out
+o2 = Output("roiOut", "TENSOR_FLOAT32", "{10, 4}") # roi out
+o3 = Output("classesOut", "TENSOR_INT32", "{10}") # classes out
+o4 = Output("batchSplitOut", "TENSOR_INT32", "{2}") # batch split out
+model = model.Operation("BOX_WITH_NMS_LIMIT",
+ i1, i2, i3, 0.3, 0.4, 5).To(o1, o2, o3, o4)
+
+input0 = {
+ i1: [ # scores
+ 0.90, 0.95, 0.75,
+ 0.80, 0.70, 0.85,
+ 0.60, 0.90, 0.95,
+ 0.90, 0.65, 0.90,
+ 0.80, 0.85, 0.80,
+ 0.60, 0.60, 0.20,
+ 0.60, 0.80, 0.40,
+ 0.90, 0.55, 0.60,
+ 0.90, 0.75, 0.70,
+ 0.80, 0.70, 0.85,
+ 0.90, 0.95, 0.75,
+ 0.80, 0.85, 0.80,
+ 0.60, 0.90, 0.95,
+ 0.60, 0.60, 0.20,
+ 0.50, 0.90, 0.80,
+ 0.90, 0.75, 0.70,
+ 0.90, 0.65, 0.90,
+ 0.90, 0.55, 0.60,
+ 0.60, 0.80, 0.40
+ ],
+ i2: [ # roi
+ 1, 1, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10,
+ 2, 2, 11, 11, 1, 1, 11, 11, 1, 1, 11, 11,
+ 3, 3, 12, 12, 2, 2, 12, 12, 2, 2, 12, 12,
+ 4, 4, 13, 13, 3, 3, 13, 13, 3, 3, 13, 13,
+ 5, 5, 14, 14, 4, 4, 14, 14, 4, 4, 14, 14,
+ 6, 6, 15, 15, 5, 5, 15, 15, 5, 5, 15, 15,
+ 7, 7, 16, 16, 6, 6, 16, 16, 6, 6, 16, 16,
+ 8, 8, 17, 17, 7, 7, 17, 17, 7, 7, 17, 17,
+ 9, 9, 18, 18, 8, 8, 18, 18, 8, 8, 18, 18,
+ 2, 2, 11, 11, 2, 2, 12, 12, 2, 2, 12, 12,
+ 1, 1, 10, 10, 1, 1, 11, 11, 1, 1, 11, 11,
+ 5, 5, 14, 14, 5, 5, 15, 15, 5, 5, 15, 15,
+ 3, 3, 12, 12, 3, 3, 13, 13, 3, 3, 13, 13,
+ 6, 6, 15, 15, 6, 6, 16, 16, 6, 6, 16, 16,
+ 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 2, 2,
+ 9, 9, 18, 18, 9, 9, 19, 19, 9, 9, 19, 19,
+ 4, 4, 13, 13, 4, 4, 14, 14, 4, 4, 14, 14,
+ 8, 8, 17, 17, 8, 8, 18, 18, 8, 8, 18, 18,
+ 7, 7, 16, 16, 7, 7, 17, 17, 7, 7, 17, 17
+ ],
+ i3: [9, 10] # batch split
+}
+
+output0 = {
+ o1: [0.95, 0.85, 0.75, 0.95, 0.7, 0.95, 0.9, 0.85, 0.95, 0.8],
+ o2: [
+ 0, 0, 10, 10,
+ 4, 4, 14, 14,
+ 8, 8, 18, 18,
+ 2, 2, 12, 12,
+ 8, 8, 18, 18,
+ 1, 1, 11, 11,
+ 0, 0, 2, 2,
+ 5, 5, 15, 15,
+ 3, 3, 13, 13,
+ 0, 0, 2, 2
+ ],
+ o3: [1, 1, 1, 2, 2, 1, 1, 1, 2, 2],
+ o4: [5, 5],
+}
+
+Example((input0, output0)).AddVariations("relaxed", "float16")