Audit object lifetime management: Fix documentation, add tests, add defensive code

When we designed the API, we documented that
ANeuralNetworksExecution_free could be called on an
ANeuralNetworksExecution with an execution in flight, but we never
implemented support for that, so the behavior would have been
undefined.  This CL changes the documentation to forbid this and adds
some defensive code to detect an attempt to free an in-flight
execution and respond by logging an error and ignoring the free.

Other documentation changes:
- Fix typo in ANeuralNetworksCompilation_free
- Clarify use of ANeuralNetworksEvent_free
- Clarify use of ANeuralNetworksExecution_free
- Clarify use of ANeuralNetworksMemory_free

Adds tests to confirm that we can ANeuralNetworks*_free() nullptr or
unfinished object without crashing.

Test: NeuralNetworksTest_static

Test: To verify that we gracefully handle
      ANeuralNetworksExecution_free with an execution in flight, added
      "sleep(5)" after ExecutionBuilder field mStarted is set to true;
      modified test IntrospectionControlTest.SimpleAddModel to add
      ANeuralNetworksExecution_free between
      ANeuralNetworksExecution_startCompute and
      ANeuralNetworksEvent_wait; ran "NeuralNetworksTest_static
      --gtest_filter=IntrospectionControlTest.SimpleAddModel 1024";
      and verified that test passes and that logcat contains expected
      message.

Bug: 123242704
Bug: 129494032
Change-Id: I59183c70b4f5794fdc122cfbc50ffd383ae066c3
Merged-In: I59183c70b4f5794fdc122cfbc50ffd383ae066c3
(cherry picked from commit 9ce80efa23fb0a598163b564411e560128684721)
diff --git a/nn/runtime/ExecutionBuilder.h b/nn/runtime/ExecutionBuilder.h
index 6b73071..f904ed2 100644
--- a/nn/runtime/ExecutionBuilder.h
+++ b/nn/runtime/ExecutionBuilder.h
@@ -108,6 +108,8 @@
     hal::ErrorStatus finish(hal::ErrorStatus error,
                             const std::vector<hal::OutputShape>& outputShapes);
 
+    bool inFlight() const { return mStarted && !mFinished; }
+
    private:
     // If a callback is provided, then this is asynchronous. If a callback is
     // not provided (i.e., is nullptr), then this is synchronous.
diff --git a/nn/runtime/NeuralNetworks.cpp b/nn/runtime/NeuralNetworks.cpp
index 987c7f8..04a4606 100644
--- a/nn/runtime/NeuralNetworks.cpp
+++ b/nn/runtime/NeuralNetworks.cpp
@@ -993,7 +993,6 @@
 void ANeuralNetworksCompilation_free(ANeuralNetworksCompilation* compilation) {
     NNTRACE_RT(NNTRACE_PHASE_TERMINATION, "ANeuralNetworksCompilation_free");
     // No validation.  Free of nullptr is valid.
-    // TODO specification says that a compilation-in-flight can be deleted
     CompilationBuilder* c = reinterpret_cast<CompilationBuilder*>(compilation);
     delete c;
 }
@@ -1047,9 +1046,13 @@
 
 void ANeuralNetworksExecution_free(ANeuralNetworksExecution* execution) {
     NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ANeuralNetworksExecution_free");
-    // TODO specification says that an execution-in-flight can be deleted
-    // No validation.  Free of nullptr is valid.
+    // Free of nullptr is valid.
     ExecutionBuilder* r = reinterpret_cast<ExecutionBuilder*>(execution);
+    if (r && r->inFlight()) {
+        LOG(ERROR) << "ANeuralNetworksExecution_free passed an in-flight ANeuralNetworksExecution"
+                   << " and is therefore ignored";
+        return;
+    }
     delete r;
 }
 
diff --git a/nn/runtime/include/NeuralNetworks.h b/nn/runtime/include/NeuralNetworks.h
index 3a46e2d..8779a35 100644
--- a/nn/runtime/include/NeuralNetworks.h
+++ b/nn/runtime/include/NeuralNetworks.h
@@ -5037,6 +5037,15 @@
  * of the element type byte size, e.g., a tensor with
  * {@link ANEURALNETWORKS_TENSOR_FLOAT32} type must be aligned on 4-byte boundary.
  *
+ * It is the application's responsibility to ensure that there are no uses of
+ * the memory after calling {@link ANeuralNetworksMemory_free}. This includes
+ * any model which references this memory because of a call to
+ * {@link ANeuralNetworksModel_setOperandValueFromMemory}, any compilation
+ * created using such a model, any execution object or burst object created
+ * using such a compilation, or any execution which references this memory
+ * because of a call to {@link ANeuralNetworksExecution_setInputFromMemory} or
+ * {@link ANeuralNetworksExecution_setOutputFromMemory}.
+ *
  * Available since API level 27.
  */
 typedef struct ANeuralNetworksMemory ANeuralNetworksMemory;
@@ -5157,6 +5166,13 @@
  * modifies an execution at a given time. It is however safe for more than one
  * thread to use {@link ANeuralNetworksEvent_wait} at the same time.</p>
  *
+ * <p>It is also the application's responsibility to ensure that the execution
+ * either has never been scheduled or has completed (i.e., that
+ * {@link ANeuralNetworksExecution_burstCompute},
+ * {@link ANeuralNetworksExecution_compute}, or
+ * {@link ANeuralNetworksEvent_wait} has returned) before calling
+ * {@link ANeuralNetworksExecution_free}.</p>.
+ *
  * <p>It is also the application's responsibility to ensure that there are no other
  * uses of the execution after calling {@link ANeuralNetworksExecution_free}.</p>
  *
@@ -5767,7 +5783,8 @@
  *
  * Available since API level 27.
  *
- * @param memory The memory object to be freed.
+ * @param memory The memory object to be freed. Passing NULL is acceptable and
+ *               results in no operation.
  */
 void ANeuralNetworksMemory_free(ANeuralNetworksMemory* memory) __INTRODUCED_IN(27);
 
@@ -6109,7 +6126,7 @@
  * Destroy a compilation.
  *
  * The compilation need not have been finished by a call to
- * {@link ANeuralNetworksModel_finish}.
+ * {@link ANeuralNetworksCompilation_finish}.
  *
  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
  *
@@ -6182,12 +6199,15 @@
 /**
  * Destroy an execution.
  *
- * <p>If called on an execution for which
- * {@link ANeuralNetworksExecution_startCompute} has been called, the
- * function will return immediately but will mark the execution to be deleted
- * once the computation completes. The related {@link ANeuralNetworksEvent}
- * will be signaled and the {@link ANeuralNetworksEvent_wait} will return
- * ANEURALNETWORKS_ERROR_DELETED.
+ * <p>The execution need not have been scheduled by a call to
+ * {@link ANeuralNetworksExecution_burstCompute},
+ * {@link ANeuralNetworksExecution_compute}, or
+ * {@link ANeuralNetworksExecution_startCompute}; but if it has been scheduled,
+ * then the application must not call {@link ANeuralNetworksExecution_free}
+ * until the execution has completed (i.e.,
+ * {@link ANeuralNetworksExecution_burstCompute},
+ * {@link ANeuralNetworksExecution_compute}, or
+ * {@link ANeuralNetworksEvent_wait} has returned).
  *
  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
  *
@@ -6437,6 +6457,9 @@
  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
  *
  * Available since API level 27.
+ *
+ * @param event The event object to be destroyed. Passing NULL is acceptable and
+ *              results in no operation.
  */
 void ANeuralNetworksEvent_free(ANeuralNetworksEvent* event) __INTRODUCED_IN(27);
 
diff --git a/nn/runtime/test/Android.bp b/nn/runtime/test/Android.bp
index caff595..00d3f7b 100644
--- a/nn/runtime/test/Android.bp
+++ b/nn/runtime/test/Android.bp
@@ -60,6 +60,7 @@
         // Changes to this list must be reflected in cts/tests/tests/neuralnetworks/Android.mk
         // to ensure CTS tests coverage.
         "generated/tests/*.cpp",
+        "TestFree.cpp",
         "TestGenerated.cpp",
         "TestMemory.cpp",
         "TestOperandExtraParams.cpp",
diff --git a/nn/runtime/test/TestFree.cpp b/nn/runtime/test/TestFree.cpp
new file mode 100644
index 0000000..dedf55e
--- /dev/null
+++ b/nn/runtime/test/TestFree.cpp
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This file tests that various abnormal uses of ANeuralNetworks*_free() don't crash.
+//
+// Limitation: It doesn't set various combinations of properties on objects before
+// freeing those objects.
+
+#include "NeuralNetworks.h"
+
+#include <gtest/gtest.h>
+
+#include <vector>
+
+namespace {
+
+ANeuralNetworksModel* createUnfinishedModel() {
+    ANeuralNetworksModel* model = nullptr;
+    EXPECT_EQ(ANeuralNetworksModel_create(&model), ANEURALNETWORKS_NO_ERROR);
+
+    uint32_t dimensions[] = {1};
+    ANeuralNetworksOperandType type = {
+            .type = ANEURALNETWORKS_TENSOR_FLOAT32, .dimensionCount = 1, .dimensions = dimensions};
+    EXPECT_EQ(ANeuralNetworksModel_addOperand(model, &type), ANEURALNETWORKS_NO_ERROR);
+    EXPECT_EQ(ANeuralNetworksModel_addOperand(model, &type), ANEURALNETWORKS_NO_ERROR);
+
+    const uint32_t inList[]{0};
+    const uint32_t outList[]{1};
+    EXPECT_EQ(
+            ANeuralNetworksModel_addOperation(model, ANEURALNETWORKS_FLOOR, 1, inList, 1, outList),
+            ANEURALNETWORKS_NO_ERROR);
+    EXPECT_EQ(ANeuralNetworksModel_identifyInputsAndOutputs(model, 1, inList, 1, outList),
+              ANEURALNETWORKS_NO_ERROR);
+
+    return model;
+}
+
+ANeuralNetworksModel* createFinishedModel() {
+    ANeuralNetworksModel* const model = createUnfinishedModel();
+    EXPECT_EQ(ANeuralNetworksModel_finish(model), ANEURALNETWORKS_NO_ERROR);
+    return model;
+}
+
+std::vector<ANeuralNetworksDevice*> createDeviceList() {
+    std::vector<ANeuralNetworksDevice*> devices;
+
+    uint32_t numDevices = 0;
+    EXPECT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR);
+    for (uint32_t devIndex = 0; devIndex < numDevices; ++devIndex) {
+        ANeuralNetworksDevice* device = nullptr;
+        const int getDeviceStatus = ANeuralNetworks_getDevice(devIndex, &device);
+        EXPECT_EQ(getDeviceStatus, ANEURALNETWORKS_NO_ERROR);
+        if (getDeviceStatus == ANEURALNETWORKS_NO_ERROR) {
+            devices.push_back(device);
+        }
+    }
+
+    return devices;
+}
+
+TEST(Free, Null) {
+    ANeuralNetworksBurst_free(nullptr);
+    ANeuralNetworksCompilation_free(nullptr);
+    ANeuralNetworksEvent_free(nullptr);
+    ANeuralNetworksExecution_free(nullptr);
+    ANeuralNetworksMemory_free(nullptr);
+    ANeuralNetworksModel_free(nullptr);
+}
+
+TEST(Free, UnfinishedModel) {
+    ANeuralNetworksModel* const model = createUnfinishedModel();
+    ANeuralNetworksModel_free(model);
+}
+
+TEST(Free, UnfinishedCompilation) {
+    ANeuralNetworksModel* const model = createFinishedModel();
+
+    ANeuralNetworksCompilation* compilation = nullptr;
+    ASSERT_EQ(ANeuralNetworksCompilation_create(model, &compilation), ANEURALNETWORKS_NO_ERROR);
+    ANeuralNetworksCompilation_free(compilation);
+
+    ANeuralNetworksModel_free(model);
+}
+
+TEST(Free, UnfinishedCompilationForDevices) {
+    ANeuralNetworksModel* const model = createFinishedModel();
+
+    const auto devices = createDeviceList();
+
+    ANeuralNetworksCompilation* compilation = nullptr;
+    ASSERT_EQ(ANeuralNetworksCompilation_createForDevices(model, devices.data(), devices.size(),
+                                                          &compilation),
+              ANEURALNETWORKS_NO_ERROR);
+    ANeuralNetworksCompilation_free(compilation);
+
+    ANeuralNetworksModel_free(model);
+}
+
+TEST(Free, UnscheduledExecution) {
+    ANeuralNetworksModel* const model = createFinishedModel();
+
+    ANeuralNetworksCompilation* compilation = nullptr;
+    ASSERT_EQ(ANeuralNetworksCompilation_create(model, &compilation), ANEURALNETWORKS_NO_ERROR);
+    ASSERT_EQ(ANeuralNetworksCompilation_finish(compilation), ANEURALNETWORKS_NO_ERROR);
+
+    ANeuralNetworksExecution* execution = nullptr;
+    ASSERT_EQ(ANeuralNetworksExecution_create(compilation, &execution), ANEURALNETWORKS_NO_ERROR);
+    ANeuralNetworksExecution_free(execution);
+
+    ANeuralNetworksCompilation_free(compilation);
+
+    ANeuralNetworksModel_free(model);
+}
+
+}  // namespace