Merge "Use design capacity instead of full charge capacity"
diff --git a/current.txt b/current.txt
index 860a034..d94f159 100644
--- a/current.txt
+++ b/current.txt
@@ -578,7 +578,7 @@
 5f6d3097ba84cb63c430787123f4de1b31c11f90b531b98eae9a8623a5ae962a android.hardware.neuralnetworks@1.1::types
 fb382e986c10b8fbb797a8546e8f9ea6d1107bfe6f3fb7e57f6bbbf1f807a906 android.hardware.neuralnetworks@1.2::IDevice
 40e71cd693de5b832325c5d8f081f2ff20a7ba2b89d401cee5b4b3eb0e241681 android.hardware.neuralnetworks@1.2::IPreparedModel
-7f7ef383268c95a1b8fe4e55c662bc806bb0ac11a154f6b049a113a44b0f024f android.hardware.neuralnetworks@1.2::types
+6c29d6fdd5445911df5456b3b84b949cdd59fca0c0b5507662f26a5cac0cf5e5 android.hardware.neuralnetworks@1.2::types
 a785a57447a81e9c130eef6904c3a5c256076c6a04588c40620ebd6fa2660d77 android.hardware.radio@1.2::types
 1a6e2bd289f22931c526b21916910f1d4c436b7acb9556e4243de4ce8e6cc2e4 android.hardware.soundtrigger@2.0::ISoundTriggerHwCallback
 fd65298e1e09e0e3c781ab18305920d757dbe55a3b459ce17814ec5cf6dfee99 android.hardware.wifi@1.0::IWifiP2pIface
@@ -625,12 +625,12 @@
 adb0efdf1462e9b2e742c0dcadd598666aac551f178be06e755bfcdf5797abd0 android.hardware.keymaster@4.1::IOperation
 ddcf89cd8ee2df0d32aee55050826446fb64f7aafde0a7cd946c64f61b1a364c android.hardware.keymaster@4.1::types
 65c16331e57f6dd68b3971f06f78fe9e3209afb60630c31705aa355f9a52bf0d android.hardware.neuralnetworks@1.3::IBuffer
-9b41dd49e2dcc2ecb4243d03f8421d72494ada5cf2945bff88f0019eeca56923 android.hardware.neuralnetworks@1.3::IDevice
+9db064ee44268a876be0367ff771e618362d39ec603b6ecab17e1575725fcd87 android.hardware.neuralnetworks@1.3::IDevice
 4167dc3ad35e9cd0d2057d4868c7675ae2c3c9d05bbd614c1f5dccfa5fd68797 android.hardware.neuralnetworks@1.3::IExecutionCallback
 2fa3679ad7c94b5e88724adcd560c561041068a4ca565c63830e68101988746a android.hardware.neuralnetworks@1.3::IFencedExecutionCallback
 237b23b126a66f3432658020fed78cdd06ba6297459436fe6bae0ba753370833 android.hardware.neuralnetworks@1.3::IPreparedModel
 0439a1fbbec7f16e5e4c653d85ac685d51bfafbae15b8f8cca530acdd7d6a8ce android.hardware.neuralnetworks@1.3::IPreparedModelCallback
-3646950b10f7cacdafca13609b0e18496cea942f3bdfe920494661856eff48bb android.hardware.neuralnetworks@1.3::types
+abbc4e1a969881c9f8ab587add5b5e75b08df834c9c969c013ae38cb4bb16f6a android.hardware.neuralnetworks@1.3::types
 3e01d4446cd69fd1c48f8572efd97487bc179564b32bd795800b97bbe10be37b android.hardware.wifi@1.4::IWifi
 a64467bae843569f0d465c5be7f0c7a5b987985b55a3ef4794dd5afc68538650 android.hardware.wifi.supplicant@1.3::ISupplicant
 44445b8a03d7b9e68b2fbd954672c18a8fce9e32851b0692f4f4ab3407f86ecb android.hardware.wifi.supplicant@1.3::ISupplicantStaIface
diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal
index e867120..993a105 100644
--- a/neuralnetworks/1.2/types.hal
+++ b/neuralnetworks/1.2/types.hal
@@ -3165,7 +3165,7 @@
      * * 8: An {@link OperandType::INT32} scalar, specifying the stride when
      *      walking through input in the ‘height’ dimension.
      * * 9: An {@link OperandType::INT32} scalar, specifying the number of
-            groups.
+     *      groups.
      * * 10: An {@link OperandType::INT32} scalar, and has to be one of the
      *       {@link FusedActivationFunc} values. Specifies the activation to
      *       invoke on the result.
diff --git a/neuralnetworks/1.3/IDevice.hal b/neuralnetworks/1.3/IDevice.hal
index 4931539..79f9c32 100644
--- a/neuralnetworks/1.3/IDevice.hal
+++ b/neuralnetworks/1.3/IDevice.hal
@@ -372,5 +372,5 @@
      */
     allocate(BufferDesc desc, vec<IPreparedModel> preparedModels, vec<BufferRole> inputRoles,
              vec<BufferRole> outputRoles)
-            generates (ErrorStatus status, IBuffer buffer, int32_t token);
+            generates (ErrorStatus status, IBuffer buffer, uint32_t token);
 };
diff --git a/neuralnetworks/1.3/types.hal b/neuralnetworks/1.3/types.hal
index ed577e4..c5dc08c 100644
--- a/neuralnetworks/1.3/types.hal
+++ b/neuralnetworks/1.3/types.hal
@@ -22,9 +22,9 @@
 import @1.0::RequestArgument;
 import @1.2::Model.ExtensionNameAndPrefix;
 import @1.2::Model.ExtensionTypeEncoding;
+import @1.2::Operand.ExtraParams;
 import @1.2::OperandType;
 import @1.2::OperationType;
-import @1.2::SymmPerChannelQuantParams;
 
 import android.hidl.safe_union@1.0::Monostate;
 
@@ -3253,7 +3253,7 @@
      * * 8: An {@link OperandType::INT32} scalar, specifying the stride when
      *      walking through input in the ‘height’ dimension.
      * * 9: An {@link OperandType::INT32} scalar, specifying the number of
-            groups.
+     *      groups.
      * * 10: An {@link OperandType::INT32} scalar, and has to be one of the
      *       {@link FusedActivationFunc} values. Specifies the activation to
      *       invoke on the result.
@@ -5343,27 +5343,7 @@
     /**
      * Additional parameters specific to a particular operand type.
      */
-    safe_union ExtraParams {
-       /**
-        * No additional parameters.
-        */
-       Monostate none;
-
-       /**
-        * Symmetric per-channel quantization parameters.
-        *
-        * Only applicable to operands of type TENSOR_QUANT8_SYMM_PER_CHANNEL.
-        */
-       SymmPerChannelQuantParams channelQuant;
-
-       /**
-        * Extension operand parameters.
-        *
-        * The framework treats this as an opaque data blob.
-        * The format is up to individual extensions.
-        */
-       vec<uint8_t> extension;
-    } extraParams;
+    @1.2::Operand.ExtraParams extraParams;
 };
 
 /**
@@ -5551,7 +5531,7 @@
          * Specifies a driver-managed buffer. It is the token returned from IDevice::allocate,
          * and is specific to the IDevice object.
          */
-        int32_t token;
+        uint32_t token;
     };
 
     /**
@@ -5573,7 +5553,7 @@
      * Time point of the steady clock (as from std::chrono::steady_clock)
      * measured in nanoseconds.
      */
-    uint64_t nanoseconds;
+    uint64_t nanosecondsSinceEpoch;
 };
 
 /**
diff --git a/neuralnetworks/1.3/types.t b/neuralnetworks/1.3/types.t
index d4351ec..3d0d02d 100644
--- a/neuralnetworks/1.3/types.t
+++ b/neuralnetworks/1.3/types.t
@@ -24,9 +24,9 @@
 import @1.0::RequestArgument;
 import @1.2::Model.ExtensionNameAndPrefix;
 import @1.2::Model.ExtensionTypeEncoding;
+import @1.2::Operand.ExtraParams;
 import @1.2::OperandType;
 import @1.2::OperationType;
-import @1.2::SymmPerChannelQuantParams;
 
 import android.hidl.safe_union@1.0::Monostate;
 
@@ -319,27 +319,7 @@
     /**
      * Additional parameters specific to a particular operand type.
      */
-    safe_union ExtraParams {
-       /**
-        * No additional parameters.
-        */
-       Monostate none;
-
-       /**
-        * Symmetric per-channel quantization parameters.
-        *
-        * Only applicable to operands of type TENSOR_QUANT8_SYMM_PER_CHANNEL.
-        */
-       SymmPerChannelQuantParams channelQuant;
-
-       /**
-        * Extension operand parameters.
-        *
-        * The framework treats this as an opaque data blob.
-        * The format is up to individual extensions.
-        */
-       vec<uint8_t> extension;
-    } extraParams;
+    @1.2::Operand.ExtraParams extraParams;
 };
 
 /**
@@ -527,7 +507,7 @@
          * Specifies a driver-managed buffer. It is the token returned from IDevice::allocate,
          * and is specific to the IDevice object.
          */
-        int32_t token;
+        uint32_t token;
     };
 
     /**
@@ -549,7 +529,7 @@
      * Time point of the steady clock (as from std::chrono::steady_clock)
      * measured in nanoseconds.
      */
-    uint64_t nanoseconds;
+    uint64_t nanosecondsSinceEpoch;
 };
 
 /**
diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
index 8ea0b7e..82f34ff 100644
--- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
@@ -122,15 +122,15 @@
     // Return {IBuffer object, token} if successful.
     // Return {nullptr, 0} if device memory is not supported.
     template <IOType ioType>
-    std::pair<sp<IBuffer>, int32_t> allocate(uint32_t index) {
-        std::pair<sp<IBuffer>, int32_t> buffer;
+    std::pair<sp<IBuffer>, uint32_t> allocate(uint32_t index) {
+        std::pair<sp<IBuffer>, uint32_t> buffer;
         allocateInternal<ioType>(index, &buffer);
         return buffer;
     }
 
   private:
     template <IOType ioType>
-    void allocateInternal(uint32_t index, std::pair<sp<IBuffer>, int32_t>* result) {
+    void allocateInternal(uint32_t index, std::pair<sp<IBuffer>, uint32_t>* result) {
         ASSERT_NE(result, nullptr);
 
         // Prepare arguments.
@@ -145,14 +145,14 @@
         // Allocate device memory.
         ErrorStatus status;
         sp<IBuffer> buffer;
-        int32_t token;
-        const auto ret = kDevice->allocate(
-                {}, {kPreparedModel}, inputRoles, outputRoles,
-                [&status, &buffer, &token](ErrorStatus error, const sp<IBuffer>& buf, int32_t tok) {
-                    status = error;
-                    buffer = buf;
-                    token = tok;
-                });
+        uint32_t token;
+        auto cb = [&status, &buffer, &token](ErrorStatus error, const sp<IBuffer>& buf,
+                                             uint32_t tok) {
+            status = error;
+            buffer = buf;
+            token = tok;
+        };
+        const auto ret = kDevice->allocate({}, {kPreparedModel}, inputRoles, outputRoles, cb);
 
         // Check allocation results.
         ASSERT_TRUE(ret.isOk());
@@ -217,7 +217,7 @@
             constRefSize += op.data.alignedSize();
         }
 
-        Operand::ExtraParams extraParams;
+        V1_2::Operand::ExtraParams extraParams;
         if (op.type == TestOperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
             extraParams.channelQuant(SymmPerChannelQuantParams{
                     .scales = op.channelQuant.scales, .channelDim = op.channelQuant.channelDim});
@@ -317,7 +317,7 @@
     // - [2+i, 2+i+o): Output device memories
     DeviceMemoryAllocator allocator(device, preparedModel, testModel);
     std::vector<sp<IBuffer>> buffers;
-    std::vector<int32_t> tokens;
+    std::vector<uint32_t> tokens;
 
     // Model inputs.
     hidl_vec<RequestArgument> inputs(testModel.inputIndexes.size());
diff --git a/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp b/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp
index 62ffcda..2f1e05c 100644
--- a/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp
+++ b/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp
@@ -64,11 +64,11 @@
                     std::chrono::time_point_cast<std::chrono::nanoseconds>(currentTime);
             const uint64_t nanosecondsSinceEpoch =
                     currentTimeInNanoseconds.time_since_epoch().count();
-            deadline.nanoseconds(nanosecondsSinceEpoch);
+            deadline.nanosecondsSinceEpoch(nanosecondsSinceEpoch);
         } break;
         case DeadlineBoundType::UNLIMITED: {
             uint64_t unlimited = std::numeric_limits<uint64_t>::max();
-            deadline.nanoseconds(unlimited);
+            deadline.nanosecondsSinceEpoch(unlimited);
         } break;
     }
     return deadline;
diff --git a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
index 1245432..0a35e2d 100644
--- a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
@@ -50,7 +50,7 @@
 
     OptionalTimePoint deadline;
     if (testDeadline) {
-        deadline.nanoseconds(std::numeric_limits<uint64_t>::max());
+        deadline.nanosecondsSinceEpoch(std::numeric_limits<uint64_t>::max());
     }
 
     sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
diff --git a/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp
index a29d158..2a4269f 100644
--- a/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp
@@ -61,7 +61,7 @@
 
     OptionalTimePoint deadline;
     if (testDeadline) {
-        deadline.nanoseconds(std::numeric_limits<uint64_t>::max());
+        deadline.nanosecondsSinceEpoch(std::numeric_limits<uint64_t>::max());
     }
 
     // asynchronous
diff --git a/power/aidl/android/hardware/power/Boost.aidl b/power/aidl/android/hardware/power/Boost.aidl
index 162a36a..c992fd3 100644
--- a/power/aidl/android/hardware/power/Boost.aidl
+++ b/power/aidl/android/hardware/power/Boost.aidl
@@ -29,6 +29,13 @@
     INTERACTION,
 
     /**
+     * This boost indicates that the framework is likely to provide a new
+     * display frame soon. This implies that the device should ensure that the
+     * display processing path is powered up and ready to receive that update.
+     */
+    DISPLAY_UPDATE_IMMINENT,
+
+    /**
      * Below hints are currently not sent in Android framework but OEM might choose to
      * implement for power/perf optimizations.
      */
diff --git a/power/aidl/android/hardware/power/IPower.aidl b/power/aidl/android/hardware/power/IPower.aidl
index 9fb3fc0..2c4bd86 100644
--- a/power/aidl/android/hardware/power/IPower.aidl
+++ b/power/aidl/android/hardware/power/IPower.aidl
@@ -43,7 +43,7 @@
      */
     boolean isModeSupported(in Mode type);
 
-   /**
+    /**
      * setBoost() indicates the device may need to boost some resources, as the
      * the load is likely to increase before the kernel governors can react.
      * Depending on the boost, it may be appropriate to raise the frequencies of
diff --git a/power/aidl/android/hardware/power/Mode.aidl b/power/aidl/android/hardware/power/Mode.aidl
index 1792add..ae113e3 100644
--- a/power/aidl/android/hardware/power/Mode.aidl
+++ b/power/aidl/android/hardware/power/Mode.aidl
@@ -39,6 +39,50 @@
     SUSTAINED_PERFORMANCE,
 
     /**
+     * Sets the device to a fixed performance level which can be sustained under
+     * normal indoor conditions for at least 10 minutes.
+     *
+     * This is similar to sustained performance mode, except that whereas
+     * sustained performance mode puts an upper bound on performance in the
+     * interest of long-term stability, fixed performance mode puts both upper
+     * and lower bounds on performance such that any workload run while in a
+     * fixed performance mode should complete in a repeatable amount of time
+     * (except if the device is under thermal throttling).
+     *
+     * This mode is not intended for general purpose use, but rather to enable
+     * games and other performance-sensitive applications to reduce the number
+     * of variables during profiling and performance debugging. As such, while
+     * it is valid to set the device to minimum clocks for all subsystems in
+     * this mode, it is preferable to attempt to make the relative performance
+     * of the CPU, GPU, and other subsystems match typical usage, even if the
+     * frequencies have to be reduced to provide sustainability.
+     *
+     * To calibrate this mode, follow these steps:
+     *
+     * 1) Build and push the HWUI macrobench as described in
+     *    //frameworks/base/libs/hwui/tests/macrobench/how_to_run.txt
+     * 2) Run the macrobench as follows:
+     *    while true; do \
+     *      adb shell /data/benchmarktest/hwuimacro/hwuimacro shadowgrid2 -c 200 -r 10; \
+     *    done
+     * 3) Determine a fixed set of device clocks such that the loop in (2) can
+     *    run for at least 10 minutes, starting from an idle device on a desk
+     *    at room temperature (roughly 22 Celsius), without hitting thermal
+     *    throttling.
+     * 4) After setting those clocks, set the system property
+     *    ro.power.fixed_performance_scale_factor to a value N, where N is the
+     *    number of times the loop from (2) runs during the 10 minute test
+     *    cycle. It is expected that in FIXED_PERFORMANCE mode, unless there is
+     *    thermal throttling, the loop will run N to N+1 times (inclusive).
+     *
+     * After calibrating this, while in FIXED_PERFORMANCE mode, the macrobench
+     * results obtained while running the loop in (2) should be consistent both
+     * within a given run and from the first run in the 10 minute window through
+     * the last run in the window.
+     */
+    FIXED_PERFORMANCE,
+
+    /**
      * This mode indicates VR Mode is activated or not. VR mode is intended
      * to provide minimum guarantee for performance for the amount of time the
      * device can sustain it.
diff --git a/power/aidl/vts/VtsHalPowerTargetTest.cpp b/power/aidl/vts/VtsHalPowerTargetTest.cpp
index c0e0858..25a385e 100644
--- a/power/aidl/vts/VtsHalPowerTargetTest.cpp
+++ b/power/aidl/vts/VtsHalPowerTargetTest.cpp
@@ -16,6 +16,7 @@
 #include <aidl/Gtest.h>
 #include <aidl/Vintf.h>
 
+#include <android-base/properties.h>
 #include <android/hardware/power/Boost.h>
 #include <android/hardware/power/IPower.h>
 #include <android/hardware/power/Mode.h>
@@ -27,6 +28,7 @@
 using android::ProcessState;
 using android::sp;
 using android::String16;
+using android::base::GetUintProperty;
 using android::binder::Status;
 using android::hardware::power::Boost;
 using android::hardware::power::IPower;
@@ -77,7 +79,7 @@
     for (const auto& mode : kInvalidModes) {
         bool supported;
         ASSERT_TRUE(power->isModeSupported(mode, &supported).isOk());
-        // Should return false for values outsides enum
+        // Should return false for values outside enum
         ASSERT_FALSE(supported);
     }
 }
@@ -103,11 +105,27 @@
     for (const auto& boost : kInvalidBoosts) {
         bool supported;
         ASSERT_TRUE(power->isBoostSupported(boost, &supported).isOk());
-        // Should return false for values outsides enum
+        // Should return false for values outside enum
         ASSERT_FALSE(supported);
     }
 }
 
+// FIXED_PERFORMANCE mode is required for all devices which ship on Android 11
+// or later
+TEST_P(PowerAidl, hasFixedPerformance) {
+    auto apiLevel = GetUintProperty<uint64_t>("ro.product.first_api_level", 0);
+    if (apiLevel == 0) {
+        apiLevel = GetUintProperty<uint64_t>("ro.build.version.sdk", 0);
+    }
+    ASSERT_NE(apiLevel, 0);
+
+    if (apiLevel >= 30) {
+        bool supported;
+        ASSERT_TRUE(power->isModeSupported(Mode::FIXED_PERFORMANCE, &supported).isOk());
+        ASSERT_TRUE(supported);
+    }
+}
+
 INSTANTIATE_TEST_SUITE_P(Power, PowerAidl,
                          testing::ValuesIn(android::getAidlHalInstanceNames(IPower::descriptor)),
                          android::PrintInstanceNameToString);