Merge "Improve documentation related to execution."
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp
index 2c3287a..5af3255 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp
@@ -44,6 +44,21 @@
 // in frameworks/ml/nn/runtime/tests/generated/
 #include "all_generated_V1_2_vts_tests.cpp"
 
+// Generated from spec/strided_slice_invalid_output_dims.mod.py.
+// TODO(b/132155416): Make this part of all_generated_V1_2_vts_tests.cpp.
+namespace strided_slice_invalid_output_dims {
+#include "generated/strided_slice_invalid_output_dims.example.cpp"
+#include "generated/strided_slice_invalid_output_dims.model.cpp"
+}  // namespace strided_slice_invalid_output_dims
+
+// TODO(b/132155416): Make this part of all_generated_V1_2_vts_tests.cpp.
+TEST_F(ValidationTest, strided_slice_invalid_output_dims) {
+    const Model model = strided_slice_invalid_output_dims::createTestModel();
+    const std::vector<Request> requests =
+            createRequests(strided_slice_invalid_output_dims::get_examples());
+    validateFailure(model, requests);
+}
+
 }  // namespace functional
 }  // namespace vts
 }  // namespace V1_2
diff --git a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
index 9703c2d..e935aaa 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
@@ -274,6 +274,22 @@
     }
 }
 
+void ValidationTest::validateRequestFailure(const sp<IPreparedModel>& preparedModel,
+                                            const std::vector<Request>& requests) {
+    for (const Request& request : requests) {
+        SCOPED_TRACE("Expecting request to fail [executeSynchronously]");
+        Return<void> executeStatus = preparedModel->executeSynchronously(
+                request, MeasureTiming::NO,
+                [](ErrorStatus error, const hidl_vec<OutputShape>& outputShapes,
+                   const Timing& timing) {
+                    ASSERT_NE(ErrorStatus::NONE, error);
+                    EXPECT_EQ(outputShapes.size(), 0);
+                    EXPECT_TRUE(badTiming(timing));
+                });
+        ASSERT_TRUE(executeStatus.isOk());
+    }
+}
+
 }  // namespace functional
 }  // namespace vts
 }  // namespace V1_2
diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
index 4ddefe8..666f9b5b 100644
--- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
@@ -140,6 +140,20 @@
     validateBurst(preparedModel, requests);
 }
 
+void ValidationTest::validateFailure(const Model& model, const std::vector<Request>& requests) {
+    // TODO: Should this always succeed?
+    //       What if the invalid input is part of the model (i.e., a parameter).
+    validateModel(model);
+
+    sp<IPreparedModel> preparedModel;
+    ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
+    if (preparedModel == nullptr) {
+        return;
+    }
+
+    validateRequestFailure(preparedModel, requests);
+}
+
 sp<IPreparedModel> getPreparedModel_1_2(
     const sp<V1_2::implementation::PreparedModelCallback>& callback) {
     sp<V1_0::IPreparedModel> preparedModelV1_0 = callback->getPreparedModel();
diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
index 8d1acbe..80e810a 100644
--- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
@@ -73,11 +73,14 @@
 class ValidationTest : public NeuralnetworksHidlTest {
    protected:
      void validateEverything(const Model& model, const std::vector<Request>& requests);
+     void validateFailure(const Model& model, const std::vector<Request>& requests);
 
    private:
      void validateModel(const Model& model);
      void validateRequests(const sp<IPreparedModel>& preparedModel,
                            const std::vector<Request>& requests);
+     void validateRequestFailure(const sp<IPreparedModel>& preparedModel,
+                                 const std::vector<Request>& requests);
      void validateBurst(const sp<IPreparedModel>& preparedModel,
                         const std::vector<Request>& requests);
 };
diff --git a/neuralnetworks/1.2/vts/functional/generated/strided_slice_invalid_output_dims.example.cpp b/neuralnetworks/1.2/vts/functional/generated/strided_slice_invalid_output_dims.example.cpp
new file mode 100644
index 0000000..0640833
--- /dev/null
+++ b/neuralnetworks/1.2/vts/functional/generated/strided_slice_invalid_output_dims.example.cpp
@@ -0,0 +1,116 @@
+// clang-format off
+// Generated file (from: strided_slice_invalid_output_dims.mod.py). Do not edit
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {2, 3}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}}},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {3}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {1.0f, 2.0f, 3.0f}}},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples;
+};
+
+std::vector<MixedTypedExample>& get_examples_dynamic_output_shape() {
+static std::vector<MixedTypedExample> examples_dynamic_output_shape = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {2, 3}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}}},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {3}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {1.0f, 2.0f, 3.0f}}},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_dynamic_output_shape;
+};
+
diff --git a/neuralnetworks/1.2/vts/functional/generated/strided_slice_invalid_output_dims.model.cpp b/neuralnetworks/1.2/vts/functional/generated/strided_slice_invalid_output_dims.model.cpp
new file mode 100644
index 0000000..106655a
--- /dev/null
+++ b/neuralnetworks/1.2/vts/functional/generated/strided_slice_invalid_output_dims.model.cpp
@@ -0,0 +1,216 @@
+// clang-format off
+// Generated file (from: strided_slice_invalid_output_dims.mod.py). Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2, 3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 8},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 8},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 24, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 28, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 32, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {3},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::STRIDED_SLICE,
+            .inputs = {0, 1, 2, 3, 4, 5, 6},
+            .outputs = {7},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {7};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_dynamic_output_shape() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2, 3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 8},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 8},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 24, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 28, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 32, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::STRIDED_SLICE,
+            .inputs = {0, 1, 2, 3, 4, 5, 6},
+            .outputs = {7},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {7};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_dynamic_output_shape(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/neuralnetworks/1.2/vts/functional/spec/strided_slice_invalid_output_dims.mod.py b/neuralnetworks/1.2/vts/functional/spec/strided_slice_invalid_output_dims.mod.py
new file mode 100644
index 0000000..e8d30f3
--- /dev/null
+++ b/neuralnetworks/1.2/vts/functional/spec/strided_slice_invalid_output_dims.mod.py
@@ -0,0 +1,43 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This test makes sure that executing STRIDED_SLICE results in a failure when
+# the output dimensions do not match shrinkAxisMask.
+#
+# The test generator does not support generating tests resulting in execution
+# failure, so the gTest part of this test has been written by hand.
+# TODO(b/132155416): Move this under frameworks/ml/nn/runtime/test/specs/V1_2.
+#
+# Based on strided_slice_float_11.mod.py.
+
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{2, 3}")
+begins = Parameter("begins", "TENSOR_INT32", "{2}", [0, 0])
+# The value "2" below makes the test invalid. See http://b/79856511#comment2.
+ends = Parameter("ends", "TENSOR_INT32", "{2}", [2, 3])
+strides = Parameter("strides", "TENSOR_INT32", "{2}", [1, 1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 1)
+
+output = Output("output", "TENSOR_FLOAT32", "{3}")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+Example({
+    i1: [1, 2, 3, 4, 5, 6],
+    output: [1, 2, 3],
+})
diff --git a/wifi/1.2/default/wifi_chip.cpp b/wifi/1.2/default/wifi_chip.cpp
index 8c2d3cb..9a31d50 100644
--- a/wifi/1.2/default/wifi_chip.cpp
+++ b/wifi/1.2/default/wifi_chip.cpp
@@ -111,16 +111,16 @@
 bool removeOldFilesInternal() {
     time_t now = time(0);
     const time_t delete_files_before = now - kMaxRingBufferFileAgeSeconds;
-    DIR* dir_dump = opendir(kTombstoneFolderPath);
+    std::unique_ptr<DIR, decltype(&closedir)> dir_dump(
+        opendir(kTombstoneFolderPath), closedir);
     if (!dir_dump) {
         PLOG(ERROR) << "Failed to open directory";
         return false;
     }
-    unique_fd dir_auto_closer(dirfd(dir_dump));
     struct dirent* dp;
     bool success = true;
     std::list<std::pair<const time_t, std::string>> valid_files;
-    while ((dp = readdir(dir_dump))) {
+    while ((dp = readdir(dir_dump.get()))) {
         if (dp->d_type != DT_REG) {
             continue;
         }
@@ -242,13 +242,13 @@
 size_t cpioArchiveFilesInDir(int out_fd, const char* input_dir) {
     struct dirent* dp;
     size_t n_error = 0;
-    DIR* dir_dump = opendir(input_dir);
+    std::unique_ptr<DIR, decltype(&closedir)> dir_dump(opendir(input_dir),
+                                                       closedir);
     if (!dir_dump) {
         PLOG(ERROR) << "Failed to open directory";
         return ++n_error;
     }
-    unique_fd dir_auto_closer(dirfd(dir_dump));
-    while ((dp = readdir(dir_dump))) {
+    while ((dp = readdir(dir_dump.get()))) {
         if (dp->d_type != DT_REG) {
             continue;
         }