Snap for 4680931 from bb3ebd306cf045b2370450188d3f50a0634d8179 to qt-release

Change-Id: If07d456b3507987ecc8244b8dcbaa4fe2b06ce3d
diff --git a/automotive/evs/1.0/vts/functional/Android.bp b/automotive/evs/1.0/vts/functional/Android.bp
index 6ac2458..5d696fc 100644
--- a/automotive/evs/1.0/vts/functional/Android.bp
+++ b/automotive/evs/1.0/vts/functional/Android.bp
@@ -23,20 +23,13 @@
         "FormatConvert.cpp"
     ],
 
-    defaults: [
-        "hidl_defaults",
-    ],
+    defaults: ["VtsHalTargetTestDefaults"],
 
     shared_libs: [
-        "android.hardware.automotive.evs@1.0",
-        "liblog",
-        "libutils",
         "libui",
-        "libhidlbase",
-        "libhidltransport",
     ],
 
-    static_libs: ["VtsHalHidlTargetTestBase"],
+    static_libs: ["android.hardware.automotive.evs@1.0"],
 
     cflags: [
         "-O0",
diff --git a/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h b/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h
index c1c511f..e54de00 100644
--- a/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h
+++ b/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h
@@ -429,7 +429,7 @@
 
     {.config = {.prop = toInt(VehicleProperty::AP_POWER_BOOTUP_REASON),
                 .access = VehiclePropertyAccess::READ,
-                .changeMode = VehiclePropertyChangeMode::ON_CHANGE},
+                .changeMode = VehiclePropertyChangeMode::STATIC},
      .initialValue = {.int32Values = {toInt(VehicleApPowerBootupReason::USER_POWER_ON)}}},
 
     {
diff --git a/automotive/vehicle/2.0/types.hal b/automotive/vehicle/2.0/types.hal
index 3001213..87daedc 100644
--- a/automotive/vehicle/2.0/types.hal
+++ b/automotive/vehicle/2.0/types.hal
@@ -510,6 +510,7 @@
      *
      * @change_mode VehiclePropertyChangeMode:ON_CHANGE
      * @access VehiclePropertyAccess:READ
+     * @data_enum VehicleTurnSignal
      */
     TURN_SIGNAL_STATE = (
         0x0408
@@ -522,6 +523,7 @@
      *
      * @change_mode VehiclePropertyChangeMode:ON_CHANGE
      * @access VehiclePropertyAccess:READ
+     * @data_enum VehicleIgnitionState
      */
     IGNITION_STATE = (
         0x0409
diff --git a/confirmationui/support/include/android/hardware/confirmationui/support/msg_formatting.h b/confirmationui/support/include/android/hardware/confirmationui/support/msg_formatting.h
index 0d03591..6558799 100644
--- a/confirmationui/support/include/android/hardware/confirmationui/support/msg_formatting.h
+++ b/confirmationui/support/include/android/hardware/confirmationui/support/msg_formatting.h
@@ -105,6 +105,7 @@
     PromptUserConfirmation,
     DeliverSecureInputEvent,
     Abort,
+    Vendor,
 };
 
 template <Command cmd>
@@ -115,6 +116,7 @@
 DECLARE_COMMAND(PromptUserConfirmation);
 DECLARE_COMMAND(DeliverSecureInputEvent);
 DECLARE_COMMAND(Abort);
+DECLARE_COMMAND(Vendor);
 
 using PromptUserConfirmationMsg = Message<PromptUserConfirmation_t, hidl_string, hidl_vec<uint8_t>,
                                           hidl_string, hidl_vec<UIOption>>;
@@ -166,7 +168,7 @@
 }
 inline void zero(const volatile uint8_t*, const volatile uint8_t*) {}
 // This odd alignment function aligns the stream position to a 4byte and never 8byte boundary
-// It is to accommodate the 4 byte size field which is then followed by 8byte alligned data.
+// It is to accommodate the 4 byte size field which is then followed by 8byte aligned data.
 template <typename T>
 StreamState<T> unalign(StreamState<T> s) {
     uint8_t unalignment = uintptr_t(s.pos_) & 0x3;
diff --git a/current.txt b/current.txt
index 7fcbcf1..5c53b92 100644
--- a/current.txt
+++ b/current.txt
@@ -294,7 +294,7 @@
 3b17c1fdfc389e0abe626c37054954b07201127d890c2bc05d47613ec1f4de4f android.hardware.automotive.evs@1.0::types
 b3caf524c46a47d67e6453a34419e1881942d059e146cda740502670e9a752c3 android.hardware.automotive.vehicle@2.0::IVehicle
 80fb4156fa91ce86e49bd2cabe215078f6b69591d416a09e914532eae6712052 android.hardware.automotive.vehicle@2.0::IVehicleCallback
-442de3a3d3819ff8b8bfe9ec710592ca8af7c16bfdb5eb8911b898b8f12b2bb0 android.hardware.automotive.vehicle@2.0::types
+4ff0dcfb938a5df283eef47de33b4e1284fab73f584cfc0c94e97317bdb7bf26 android.hardware.automotive.vehicle@2.0::types
 32cc50cc2a7658ec613c0c2dd2accbf6a05113b749852879e818b8b7b438db19 android.hardware.bluetooth.a2dp@1.0::IBluetoothAudioHost
 ff4be64d7992f8bec97dff37f35450e79b3430c61f85f54322ce45bef229dc3b android.hardware.bluetooth.a2dp@1.0::IBluetoothAudioOffload
 27f22d2e873e6201f9620cf4d8e2facb25bd0dd30a2b911e441b4600d560fa62 android.hardware.bluetooth.a2dp@1.0::types
diff --git a/graphics/mapper/2.0/utils/vts/Android.bp b/graphics/mapper/2.0/utils/vts/Android.bp
index 1aa3185..e43011f 100644
--- a/graphics/mapper/2.0/utils/vts/Android.bp
+++ b/graphics/mapper/2.0/utils/vts/Android.bp
@@ -16,17 +16,13 @@
 
 cc_library_static {
     name: "android.hardware.graphics.mapper@2.0-vts",
-    defaults: ["hidl_defaults"],
+    defaults: ["VtsHalTargetTestDefaults"],
     srcs: ["MapperVts.cpp"],
     cflags: [
         "-O0",
         "-g",
     ],
-    shared_libs: [
-        "libutils",
-    ],
     static_libs: [
-        "VtsHalHidlTargetTestBase",
         "android.hardware.graphics.allocator@2.0",
         "android.hardware.graphics.mapper@2.0",
     ],
diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1BasicTest.cpp b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1BasicTest.cpp
index 17f6744..10591dc 100644
--- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1BasicTest.cpp
+++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1BasicTest.cpp
@@ -286,6 +286,169 @@
     EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus);
 }
 
+class NeuralnetworksInputsOutputsTest
+    : public NeuralnetworksHidlTest,
+      public ::testing::WithParamInterface<std::tuple<bool, bool>> {
+   protected:
+    virtual void SetUp() { NeuralnetworksHidlTest::SetUp(); }
+    virtual void TearDown() { NeuralnetworksHidlTest::TearDown(); }
+    V1_1::Model createModel(const std::vector<uint32_t>& inputs,
+                            const std::vector<uint32_t>& outputs) {
+        // We set up the operands as floating-point with no designated
+        // model inputs and outputs, and then patch type and lifetime
+        // later on in this function.
+
+        std::vector<Operand> operands = {
+            {
+                .type = OperandType::TENSOR_FLOAT32,
+                .dimensions = {1},
+                .numberOfConsumers = 1,
+                .scale = 0.0f,
+                .zeroPoint = 0,
+                .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+                .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            },
+            {
+                .type = OperandType::TENSOR_FLOAT32,
+                .dimensions = {1},
+                .numberOfConsumers = 1,
+                .scale = 0.0f,
+                .zeroPoint = 0,
+                .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+                .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            },
+            {
+                .type = OperandType::INT32,
+                .dimensions = {},
+                .numberOfConsumers = 1,
+                .scale = 0.0f,
+                .zeroPoint = 0,
+                .lifetime = OperandLifeTime::CONSTANT_COPY,
+                .location = {.poolIndex = 0, .offset = 0, .length = sizeof(int32_t)},
+            },
+            {
+                .type = OperandType::TENSOR_FLOAT32,
+                .dimensions = {1},
+                .numberOfConsumers = 0,
+                .scale = 0.0f,
+                .zeroPoint = 0,
+                .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+                .location = {.poolIndex = 0, .offset = 0, .length = 0},
+            },
+        };
+
+        const std::vector<Operation> operations = {{
+            .type = OperationType::ADD, .inputs = {0, 1, 2}, .outputs = {3},
+        }};
+
+        std::vector<uint8_t> operandValues;
+        int32_t activation[1] = {static_cast<int32_t>(FusedActivationFunc::NONE)};
+        operandValues.insert(operandValues.end(), reinterpret_cast<const uint8_t*>(&activation[0]),
+                             reinterpret_cast<const uint8_t*>(&activation[1]));
+
+        if (kQuantized) {
+            for (auto& operand : operands) {
+                if (operand.type == OperandType::TENSOR_FLOAT32) {
+                    operand.type = OperandType::TENSOR_QUANT8_ASYMM;
+                    operand.scale = 1.0f;
+                    operand.zeroPoint = 0;
+                }
+            }
+        }
+
+        auto patchLifetime = [&operands](const std::vector<uint32_t>& operandIndexes,
+                                         OperandLifeTime lifetime) {
+            for (uint32_t index : operandIndexes) {
+                operands[index].lifetime = lifetime;
+            }
+        };
+        if (kInputHasPrecedence) {
+            patchLifetime(outputs, OperandLifeTime::MODEL_OUTPUT);
+            patchLifetime(inputs, OperandLifeTime::MODEL_INPUT);
+        } else {
+            patchLifetime(inputs, OperandLifeTime::MODEL_INPUT);
+            patchLifetime(outputs, OperandLifeTime::MODEL_OUTPUT);
+        }
+
+        return {
+            .operands = operands,
+            .operations = operations,
+            .inputIndexes = inputs,
+            .outputIndexes = outputs,
+            .operandValues = operandValues,
+            .pools = {},
+        };
+    }
+    void check(const std::string& name,
+               bool expectation,  // true = success
+               const std::vector<uint32_t>& inputs, const std::vector<uint32_t>& outputs) {
+        SCOPED_TRACE(name + " (HAL calls should " + (expectation ? "succeed" : "fail") + ", " +
+                     (kInputHasPrecedence ? "input" : "output") + " precedence, " +
+                     (kQuantized ? "quantized" : "float"));
+
+        V1_1::Model model = createModel(inputs, outputs);
+
+        // ensure that getSupportedOperations_1_1() checks model validity
+        ErrorStatus supportedOpsErrorStatus = ErrorStatus::GENERAL_FAILURE;
+        Return<void> supportedOpsReturn = device->getSupportedOperations_1_1(
+            model, [&model, &supportedOpsErrorStatus](ErrorStatus status,
+                                                      const hidl_vec<bool>& supported) {
+                supportedOpsErrorStatus = status;
+                if (status == ErrorStatus::NONE) {
+                    ASSERT_EQ(supported.size(), model.operations.size());
+                }
+            });
+        ASSERT_TRUE(supportedOpsReturn.isOk());
+        ASSERT_EQ(supportedOpsErrorStatus,
+                  (expectation ? ErrorStatus::NONE : ErrorStatus::INVALID_ARGUMENT));
+
+        // ensure that prepareModel_1_1() checks model validity
+        sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback;
+        ASSERT_NE(preparedModelCallback.get(), nullptr);
+        Return<ErrorStatus> prepareLaunchReturn =
+            device->prepareModel_1_1(model, preparedModelCallback);
+        ASSERT_TRUE(prepareLaunchReturn.isOk());
+        ASSERT_TRUE(prepareLaunchReturn == ErrorStatus::NONE ||
+                    prepareLaunchReturn == ErrorStatus::INVALID_ARGUMENT);
+        bool preparationOk = (prepareLaunchReturn == ErrorStatus::NONE);
+        if (preparationOk) {
+            preparedModelCallback->wait();
+            preparationOk = (preparedModelCallback->getStatus() == ErrorStatus::NONE);
+        }
+
+        if (preparationOk) {
+            ASSERT_TRUE(expectation);
+        } else {
+            // Preparation can fail for reasons other than an invalid model --
+            // for example, perhaps not all operations are supported, or perhaps
+            // the device hit some kind of capacity limit.
+            bool invalid = prepareLaunchReturn == ErrorStatus::INVALID_ARGUMENT ||
+                           preparedModelCallback->getStatus() == ErrorStatus::INVALID_ARGUMENT;
+            ASSERT_NE(expectation, invalid);
+        }
+    }
+
+    // Indicates whether an operand that appears in both the inputs
+    // and outputs vector should have lifetime appropriate for input
+    // rather than for output.
+    const bool kInputHasPrecedence = std::get<0>(GetParam());
+
+    // Indicates whether we should test TENSOR_QUANT8_ASYMM rather
+    // than TENSOR_FLOAT32.
+    const bool kQuantized = std::get<1>(GetParam());
+};
+
+TEST_P(NeuralnetworksInputsOutputsTest, Validate) {
+    check("Ok", true, {0, 1}, {3});
+    check("InputIsOutput", false, {0, 1}, {3, 0});
+    check("OutputIsInput", false, {0, 1, 3}, {3});
+    check("DuplicateInputs", false, {0, 1, 0}, {3});
+    check("DuplicateOutputs", false, {0, 1}, {3, 3});
+}
+
+INSTANTIATE_TEST_CASE_P(Flavor, NeuralnetworksInputsOutputsTest,
+                        ::testing::Combine(::testing::Bool(), ::testing::Bool()));
+
 }  // namespace functional
 }  // namespace vts
 }  // namespace V1_1
diff --git a/wifi/1.0/vts/functional/Android.bp b/wifi/1.0/vts/functional/Android.bp
index b2f76a3..6522f4d 100644
--- a/wifi/1.0/vts/functional/Android.bp
+++ b/wifi/1.0/vts/functional/Android.bp
@@ -16,7 +16,7 @@
 
 cc_library_static {
     name: "VtsHalWifiV1_0TargetTestUtil",
-    defaults: ["hidl_defaults"],
+    defaults: ["VtsHalTargetTestDefaults"],
     srcs: [
         "wifi_hidl_call_util_selftest.cpp",
         "wifi_hidl_test.cpp",
@@ -26,16 +26,9 @@
         "."
     ],
     shared_libs: [
-        "libbase",
-        "liblog",
-        "libcutils",
-        "libhidlbase",
-        "libhidltransport",
         "libnativehelper",
-        "libutils",
-        "android.hardware.wifi@1.0",
     ],
-    static_libs: ["VtsHalHidlTargetTestBase"],
+    static_libs: ["android.hardware.wifi@1.0"],
 }
 
 cc_test {