Merge "Disable one non-sensible RGG test case on CONV_2D."
diff --git a/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp b/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp
index 2ad183b..6c21b7e 100644
--- a/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp
+++ b/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp
@@ -52,7 +52,7 @@
 bool RandomOperand::createEdgeIfValid(const RandomOperand& other) const {
     if (other.type != RandomOperandType::INPUT) return false;
     if (dataType != other.dataType || dimensions.size() != other.dimensions.size() ||
-        scale != other.scale || zeroPoint != other.zeroPoint)
+        scale != other.scale || zeroPoint != other.zeroPoint || doNotConnect || other.doNotConnect)
         return false;
     return RandomVariableNetwork::get()->setEqualIfCompatible(dimensions, other.dimensions);
 }
@@ -320,17 +320,26 @@
     EXPECT_LE(mse, criterion.mse);
 }
 
-void expectBooleanEqual(const RandomOperand& op, const OperandBuffer& test) {
+// For boolean values, we expect the number of mismatches does not exceed a certain ratio.
+void expectBooleanNearlyEqual(const RandomOperand& op, const OperandBuffer& test,
+                              float allowedErrorRatio) {
     const bool8* actual = reinterpret_cast<const bool8*>(test.data());
     const bool8* expected = reinterpret_cast<const bool8*>(op.buffer.data());
     uint32_t len = op.getNumberOfElements();
     uint32_t numErrors = 0;
+    std::stringstream errorMsg;
     for (uint32_t i = 0; i < len; i++) {
-        SCOPED_TRACE(testing::Message() << "When comparing element " << i);
-        if (numErrors < kMaxNumberOfPrintedErrors) EXPECT_EQ(expected[i], actual[i]);
-        if (expected[i] != actual[i]) numErrors++;
+        if (expected[i] != actual[i]) {
+            if (numErrors < kMaxNumberOfPrintedErrors)
+                errorMsg << "    Expected: " << expected[i] << ", actual: " << actual[i]
+                         << ", when comparing element " << i << "\n";
+            numErrors++;
+        }
     }
-    EXPECT_EQ(numErrors, 0u);
+    // When |len| is small, the allowedErrorCount will intentionally ceil at 1, which allows for
+    // greater tolerance.
+    uint32_t allowedErrorCount = static_cast<uint32_t>(std::ceil(allowedErrorRatio * len));
+    EXPECT_LE(numErrors, allowedErrorCount) << errorMsg.str();
 }
 
 void RandomGraph::checkResults(const std::vector<OperandBuffer>& buffers,
@@ -340,35 +349,38 @@
     int i = 0;
     for (const auto& op : mOperands) {
         if (op->type == RandomOperandType::OUTPUT) {
-            SCOPED_TRACE(testing::Message() << "When comparing output " << op->ioIndex
-                                            << " of type " << toString(op->dataType));
-            switch (op->dataType) {
-                case Type::TENSOR_FLOAT32:
-                    expectNear<float>(*op, buffers[i], criteria.float32);
-                    break;
-                case Type::TENSOR_FLOAT16:
-                    expectNear<_Float16>(*op, buffers[i], criteria.float16);
-                    break;
-                case Type::TENSOR_INT32:
-                    expectNear<int32_t>(*op, buffers[i], criteria.int32);
-                    break;
-                case Type::TENSOR_QUANT8_ASYMM:
-                    expectNear<uint8_t>(*op, buffers[i], criteria.quant8Asymm);
-                    break;
-                case Type::TENSOR_QUANT8_SYMM:
-                    expectNear<int8_t>(*op, buffers[i], criteria.quant8Symm);
-                    break;
-                case Type::TENSOR_QUANT16_ASYMM:
-                    expectNear<uint16_t>(*op, buffers[i], criteria.quant16Asymm);
-                    break;
-                case Type::TENSOR_QUANT16_SYMM:
-                    expectNear<int16_t>(*op, buffers[i], criteria.quant16Symm);
-                    break;
-                case Type::TENSOR_BOOL8:
-                    expectBooleanEqual(*op, buffers[i]);
-                    break;
-                default:
-                    NN_FUZZER_CHECK(false) << "Data type not supported.";
+            SCOPED_TRACE(testing::Message()
+                         << "When comparing output " << op->ioIndex << " (op" << op->opIndex << ")"
+                         << " of type " << toString(op->dataType));
+            if (!op->doNotCheckAccuracy) {
+                switch (op->dataType) {
+                    case Type::TENSOR_FLOAT32:
+                        expectNear<float>(*op, buffers[i], criteria.float32);
+                        break;
+                    case Type::TENSOR_FLOAT16:
+                        expectNear<_Float16>(*op, buffers[i], criteria.float16);
+                        break;
+                    case Type::TENSOR_INT32:
+                        expectNear<int32_t>(*op, buffers[i], criteria.int32);
+                        break;
+                    case Type::TENSOR_QUANT8_ASYMM:
+                        expectNear<uint8_t>(*op, buffers[i], criteria.quant8Asymm);
+                        break;
+                    case Type::TENSOR_QUANT8_SYMM:
+                        expectNear<int8_t>(*op, buffers[i], criteria.quant8Symm);
+                        break;
+                    case Type::TENSOR_QUANT16_ASYMM:
+                        expectNear<uint16_t>(*op, buffers[i], criteria.quant16Asymm);
+                        break;
+                    case Type::TENSOR_QUANT16_SYMM:
+                        expectNear<int16_t>(*op, buffers[i], criteria.quant16Symm);
+                        break;
+                    case Type::TENSOR_BOOL8:
+                        expectBooleanNearlyEqual(*op, buffers[i], /*allowedErrorRatio=*/0.01);
+                        break;
+                    default:
+                        NN_FUZZER_CHECK(false) << "Data type not supported.";
+                }
             }
             i++;
         }
diff --git a/nn/runtime/test/fuzzing/RandomGraphGenerator.h b/nn/runtime/test/fuzzing/RandomGraphGenerator.h
index 5972d79..c3e2b67 100644
--- a/nn/runtime/test/fuzzing/RandomGraphGenerator.h
+++ b/nn/runtime/test/fuzzing/RandomGraphGenerator.h
@@ -54,6 +54,14 @@
     // The index of the input/output as specified in model->identifyInputsAndOutputs(...).
     int32_t ioIndex = -1;
 
+    // If set true, this operand will be ignored during the accuracy checking step.
+    bool doNotCheckAccuracy = false;
+
+    // If set true, this operand will not be connected to another operation, e.g. if this operand is
+    // an operation output, then it will not be used as an input to another operation, and will
+    // eventually end up being a model output.
+    bool doNotConnect = false;
+
     RandomOperand(const OperandSignature& op, Type dataType, uint32_t rank);
 
     // Resize the underlying operand buffer.
diff --git a/nn/runtime/test/fuzzing/TestRandomGraph.cpp b/nn/runtime/test/fuzzing/TestRandomGraph.cpp
index e47ca18..81e1ea5 100644
--- a/nn/runtime/test/fuzzing/TestRandomGraph.cpp
+++ b/nn/runtime/test/fuzzing/TestRandomGraph.cpp
@@ -422,10 +422,10 @@
         .float32 = {.atol = 1e-3f, .rtol = 1e-3f, .bias = 2e-5f, .mse = 1e-7f},
         .float16 = {.atol = 1.0f, .rtol = 1.0f, .bias = 5e-3f, .mse = 1e-4f},
         .int32 = {.atol = 1},
-        .quant8Asymm = {.atol = 8, .bias = 1, .mse = 1},
-        .quant8Symm = {.atol = 8, .bias = 1, .mse = 1},
-        .quant16Asymm = {.atol = 8, .bias = 1, .mse = 1},
-        .quant16Symm = {.atol = 8, .bias = 1, .mse = 1}};
+        .quant8Asymm = {.atol = 10, .bias = 1.5, .mse = 1.5},
+        .quant8Symm = {.atol = 10, .bias = 1.5, .mse = 1.5},
+        .quant16Asymm = {.atol = 10, .bias = 1.5, .mse = 1.5},
+        .quant16Symm = {.atol = 10, .bias = 1.5, .mse = 1.5}};
 
 /*-- NNAPI 1.0 Operations ---------------------------------------------------*/
 
@@ -574,19 +574,19 @@
         .float32 = {.atol = 1e-2f, .rtol = 1e-2f, .bias = 2e-5f, .mse = 1e-7f},
         .float16 = {.atol = 1.0f, .rtol = 1.0f, .bias = 5e-3f, .mse = 1e-4f},
         .int32 = {.atol = 1},
-        .quant8Asymm = {.atol = 8, .bias = 1, .mse = 1},
-        .quant8Symm = {.atol = 8, .bias = 1, .mse = 1},
-        .quant16Asymm = {.atol = 8, .bias = 1, .mse = 1},
-        .quant16Symm = {.atol = 8, .bias = 1, .mse = 1}};
+        .quant8Asymm = {.atol = 12, .bias = 2, .mse = 2},
+        .quant8Symm = {.atol = 12, .bias = 2, .mse = 2},
+        .quant16Asymm = {.atol = 12, .bias = 2, .mse = 2},
+        .quant16Symm = {.atol = 12, .bias = 2, .mse = 2}};
 
 const AccuracyCriteria kLargeGraphCriteria = {
         .float32 = {.atol = 1e-1f, .rtol = 1e-1f, .bias = 1e-2f, .mse = 1e-4f},
         .float16 = {.atol = 1.0f, .rtol = 1.0f, .bias = 1e-1f, .mse = 5e-2f},
         .int32 = {.atol = 1},
-        .quant8Asymm = {.atol = 10, .bias = 2, .mse = 2},
-        .quant8Symm = {.atol = 10, .bias = 2, .mse = 2},
-        .quant16Asymm = {.atol = 10, .bias = 2, .mse = 2},
-        .quant16Symm = {.atol = 10, .bias = 2, .mse = 2}};
+        .quant8Asymm = {.atol = 12, .bias = 2, .mse = 2},
+        .quant8Symm = {.atol = 12, .bias = 2, .mse = 2},
+        .quant16Asymm = {.atol = 12, .bias = 2, .mse = 2},
+        .quant16Symm = {.atol = 12, .bias = 2, .mse = 2}};
 
 // Due to the limitation of the random graph generator, graphs generated with mixed-type or
 // mixed-rank operations are likely to result in a disconnected network. Thus, we filter the
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Broadcast.cpp b/nn/runtime/test/fuzzing/operation_signatures/Broadcast.cpp
index 88a5494..e98ba46 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Broadcast.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Broadcast.cpp
@@ -45,6 +45,12 @@
         float minScale = op->inputs[0]->scale * op->inputs[1]->scale;
         op->outputs[0]->scale = getUniform(minScale, minScale * 5);
     }
+
+    // DIV and POW may produce Inf output values. We should not connect this output tensor to the
+    // input of another operation.
+    if (op->opType == ANEURALNETWORKS_DIV || op->opType == ANEURALNETWORKS_POW) {
+        op->outputs[0]->doNotConnect = true;
+    }
 }
 
 // For broadcast operations with fused activation.
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp b/nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp
index 548fbb1..dc825bf 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp
@@ -51,14 +51,34 @@
 
 DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(ABS, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16);
 DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(EXP, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16);
-DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(LOG, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16);
 DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(NEG, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16,
                                         Type::TENSOR_INT32);
-DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(RSQRT, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16);
 DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(SIN, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16);
-DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(SQRT, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16);
 DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(LOGICAL_NOT, V1_2, Type::TENSOR_BOOL8);
 
+// LOG, SQRT, and RSQRT may produce NaN output values. We should not connect the output tensor to
+// the input of another operation.
+static void elementwiseOpWithDisconnectedOutput(Type type, uint32_t rank, RandomOperation* op) {
+    sameShapeOpConstructor(type, rank, op);
+    op->outputs[0]->doNotConnect = true;
+}
+
+#define DEFINE_ELEMENTWISE_SIGNATURE_WITH_DISCONNECTED_OUTPUT(op, ver, ...)     \
+    DEFINE_OPERATION_SIGNATURE(op##_##ver){.opType = ANEURALNETWORKS_##op,      \
+                                           .supportedDataTypes = {__VA_ARGS__}, \
+                                           .supportedRanks = {1, 2, 3, 4, 5},   \
+                                           .version = HalVersion::ver,          \
+                                           .inputs = {INPUT_DEFAULT},           \
+                                           .outputs = {OUTPUT_DEFAULT},         \
+                                           .constructor = elementwiseOpWithDisconnectedOutput};
+
+DEFINE_ELEMENTWISE_SIGNATURE_WITH_DISCONNECTED_OUTPUT(LOG, V1_2, Type::TENSOR_FLOAT32,
+                                                      Type::TENSOR_FLOAT16);
+DEFINE_ELEMENTWISE_SIGNATURE_WITH_DISCONNECTED_OUTPUT(RSQRT, V1_2, Type::TENSOR_FLOAT32,
+                                                      Type::TENSOR_FLOAT16);
+DEFINE_ELEMENTWISE_SIGNATURE_WITH_DISCONNECTED_OUTPUT(SQRT, V1_2, Type::TENSOR_FLOAT32,
+                                                      Type::TENSOR_FLOAT16);
+
 // Quantized operations with special output quantization parameters.
 #define DEFINE_ELEMENTWISE_WITH_QUANT_OUTPUT_SIGNATURE(op, ver, s, z, ...)      \
     DEFINE_OPERATION_SIGNATURE(op##_##ver){.opType = ANEURALNETWORKS_##op,      \
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Normalization.cpp b/nn/runtime/test/fuzzing/operation_signatures/Normalization.cpp
index 4acfea8..59dc0ca 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Normalization.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Normalization.cpp
@@ -71,6 +71,9 @@
     if (op->inputs.size() > 1) {
         op->inputs[1]->setScalarValue<int32_t>(getUniform<int32_t>(-rank, rank - 1));
     }
+    // L2_NORMALIZATION may produce NaN output values with all zero inputs. We should not connect
+    // the output tensor to the input of another operation.
+    op->outputs[0]->doNotConnect = true;
 }
 
 DEFINE_OPERATION_SIGNATURE(L2_NORMALIZATION_V1_0){.opType = ANEURALNETWORKS_L2_NORMALIZATION,
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Reduce.cpp b/nn/runtime/test/fuzzing/operation_signatures/Reduce.cpp
index 38dcdcb..8d3a380 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Reduce.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Reduce.cpp
@@ -52,6 +52,12 @@
         }
     }
     setSameQuantization(op->outputs[0], op->inputs[0]);
+
+    // REDUCE_PROD may produce Inf output values. We should not connect the output tensor to the
+    // input of another operation.
+    if (op->opType == ANEURALNETWORKS_REDUCE_PROD) {
+        op->outputs[0]->doNotConnect = true;
+    }
 }
 
 #define DEFINE_MEAN_SIGNATURE(ver, ...)                                   \
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Selection.cpp b/nn/runtime/test/fuzzing/operation_signatures/Selection.cpp
index 8b401f4..432a488 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Selection.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Selection.cpp
@@ -180,6 +180,10 @@
     op->outputs[0]->dimensions.back() = k;
     op->outputs[1]->dimensions.back() = k;
     setSameQuantization(op->outputs[0], op->inputs[0]);
+
+    // As the sorting is not required to be stable, we should not check the second output (indices).
+    op->outputs[1]->doNotCheckAccuracy = true;
+    op->outputs[1]->doNotConnect = true;
 }
 
 DEFINE_OPERATION_SIGNATURE(TOPK_V2_V1_2){