Merge "Port AudioCapabilities to native." into main
diff --git a/Android.bp b/Android.bp
index afb1341..d882ea3 100644
--- a/Android.bp
+++ b/Android.bp
@@ -130,11 +130,11 @@
             imports: ["android.hardware.audio.core-V2"],
         },
     ],
-    frozen: true,
+    frozen: false,
 
 }
 
-latest_av_audio_types_aidl = "av-audio-types-aidl-V1"
+latest_av_audio_types_aidl = "av-audio-types-aidl-V2"
 
 cc_defaults {
     name: "latest_av_audio_types_aidl_ndk_shared",
diff --git a/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/Android.bp b/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/Android.bp
index 16ea15e..6e55a16 100644
--- a/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/Android.bp
+++ b/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/Android.bp
@@ -64,10 +64,6 @@
         "libfwdlock-decoder",
     ],
 
-    whole_static_libs: [
-        "libc++fs",
-    ],
-
     local_include_dirs: ["include"],
 
     relative_install_path: "drm",
diff --git a/media/audioaidlconversion/include/media/AidlConversionEffect.h b/media/audioaidlconversion/include/media/AidlConversionEffect.h
index b03d06b..e51bf8b 100644
--- a/media/audioaidlconversion/include/media/AidlConversionEffect.h
+++ b/media/audioaidlconversion/include/media/AidlConversionEffect.h
@@ -72,9 +72,6 @@
                 MAKE_EXTENSION_PARAMETER_ID(_effect, _tag##Tag, _extId);                          \
         aidl::android::hardware::audio::effect::Parameter _aidlParam;                             \
         RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(_id, &_aidlParam))); \
-        aidl::android::hardware::audio::effect::VendorExtension _ext =                            \
-                VALUE_OR_RETURN_STATUS(GET_PARAMETER_SPECIFIC_FIELD(                              \
-                        _aidlParam, _effect, _tag, _effect::vendor, VendorExtension));            \
         return VALUE_OR_RETURN_STATUS(                                                            \
                 aidl::android::aidl2legacy_Parameter_EffectParameterWriter(_aidlParam, _param));  \
     }
diff --git a/media/codec2/hal/client/GraphicsTracker.cpp b/media/codec2/hal/client/GraphicsTracker.cpp
index f80809a..95f5a6e 100644
--- a/media/codec2/hal/client/GraphicsTracker.cpp
+++ b/media/codec2/hal/client/GraphicsTracker.cpp
@@ -673,6 +673,15 @@
             ALOGW("BQ might not be ready for dequeueBuffer()");
             return C2_BLOCKING;
         }
+        bool cacheExpired = false;
+        {
+            std::unique_lock<std::mutex> l(mLock);
+            cacheExpired = (mBufferCache.get() != cache.get());
+        }
+        if (cacheExpired) {
+            ALOGW("a new BQ is configured. dequeueBuffer() error %d", (int)status);
+            return C2_BLOCKING;
+        }
         ALOGE("BQ in inconsistent status. dequeueBuffer() error %d", (int)status);
         return C2_CORRUPTED;
     }
diff --git a/media/codec2/hal/common/MultiAccessUnitHelper.cpp b/media/codec2/hal/common/MultiAccessUnitHelper.cpp
index b1fa82f..55bf7a9 100644
--- a/media/codec2/hal/common/MultiAccessUnitHelper.cpp
+++ b/media/codec2/hal/common/MultiAccessUnitHelper.cpp
@@ -497,9 +497,10 @@
                     // This is to take care of the last bytes and to decide to send with
                     // FLAG_INCOMPLETE or not.
                     if ((frame->mWview
-                            && (frame->mWview->offset() > frame->mLargeFrameTuning.thresholdSize))
+                            && (frame->mWview->offset() >= frame->mLargeFrameTuning.thresholdSize))
                             || frame->mComponentFrameIds.empty()) {
                         if (frame->mLargeWork) {
+                            frame->mLargeWork->result = C2_OK;
                             finalizeWork(*frame);
                             addOutWork(frame->mLargeWork);
                             frame->reset();
@@ -558,12 +559,15 @@
         c2_status_t ret = C2_OK;
         if (frame.mLargeWork == nullptr) {
             frame.mLargeWork.reset(new C2Work);
+            frame.mLargeWork->result = C2_OK;
+            frame.mLargeWork->input.flags = (C2FrameData::flags_t)0;
             frame.mLargeWork->input.ordinal = frame.inOrdinal;
             frame.mLargeWork->input.ordinal.frameIndex = frame.inOrdinal.frameIndex;
         }
         if (allocateWorket) {
             if (frame.mLargeWork->worklets.size() == 0) {
                 frame.mLargeWork->worklets.emplace_back(new C2Worklet);
+                frame.mLargeWork->worklets.back()->output.flags = (C2FrameData::flags_t)0;
             }
         }
         if (allocateBuffer) {
@@ -611,6 +615,9 @@
         if (c2ret != C2_OK) {
             return c2ret;
         }
+        uint32_t flags = work->input.flags;
+        flags |= frame.mLargeWork->input.flags;
+        frame.mLargeWork->input.flags = (C2FrameData::flags_t)flags;
         C2FrameData& outputFramedata = frame.mLargeWork->worklets.front()->output;
         if (!(*worklet)->output.configUpdate.empty()) {
             for (auto& configUpdate : (*worklet)->output.configUpdate) {
@@ -678,6 +685,9 @@
                         }
                     }
                     allocateWork(frame, true, true);
+                    uint32_t flags = work->input.flags;
+                    flags |= frame.mLargeWork->input.flags;
+                    frame.mLargeWork->input.flags = (C2FrameData::flags_t)flags;
                     C2ReadView rView = blocks.front().map().get();
                     if (rView.error()) {
                         LOG(ERROR) << "Buffer read view error";
@@ -744,7 +754,8 @@
     }
     LOG(DEBUG) << "Finalizing work with input Idx "
             << frame.mLargeWork->input.ordinal.frameIndex.peekull()
-            << " timestamp " << timeStampUs;
+            << " timestamp " << timeStampUs
+            << " inFlags " << inFlags;
     uint32_t finalFlags = 0;
     if ((!forceComplete)
             && (frame.mLargeWork->result == C2_OK)
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index 39aadd7..0aae23c 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -2605,18 +2605,31 @@
             c2_status_t status = GetCodec2BlockPool(C2BlockPool::BASIC_LINEAR, nullptr, &pool);
 
             if (status == C2_OK) {
+                int width, height;
+                config->mInputFormat->findInt32("width", &width);
+                config->mInputFormat->findInt32("height", &height);
+                // The length of the qp-map corresponds to the number of 16x16 blocks in one frame
+                int expectedMapSize = ((width + 15) / 16) * ((height + 15) / 16);
                 size_t mapSize = qpOffsetMap->size();
-                std::shared_ptr<C2LinearBlock> block;
-                status = pool->fetchLinearBlock(mapSize,
-                        C2MemoryUsage{C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE}, &block);
-                if (status == C2_OK && !block->map().get().error()) {
-                    C2WriteView wView = block->map().get();
-                    uint8_t* outData = wView.data();
-                    memcpy(outData, qpOffsetMap->data(), mapSize);
-                    C2InfoBuffer info = C2InfoBuffer::CreateLinearBuffer(
-                            kParamIndexQpOffsetMapBuffer,
-                            block->share(0, mapSize, C2Fence()));
-                    mChannel->setInfoBuffer(std::make_shared<C2InfoBuffer>(info));
+                if (mapSize >= expectedMapSize) {
+                    std::shared_ptr<C2LinearBlock> block;
+                    status = pool->fetchLinearBlock(
+                            expectedMapSize,
+                            C2MemoryUsage{C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE},
+                            &block);
+                    if (status == C2_OK && !block->map().get().error()) {
+                        C2WriteView wView = block->map().get();
+                        uint8_t* outData = wView.data();
+                        memcpy(outData, qpOffsetMap->data(), expectedMapSize);
+                        C2InfoBuffer info = C2InfoBuffer::CreateLinearBuffer(
+                                kParamIndexQpOffsetMapBuffer,
+                                block->share(0, expectedMapSize, C2Fence()));
+                        mChannel->setInfoBuffer(std::make_shared<C2InfoBuffer>(info));
+                    }
+                } else {
+                    ALOGE("Ignoring param key %s as buffer size %zu is less than expected "
+                          "buffer size %d",
+                          PARAMETER_KEY_QP_OFFSET_MAP, mapSize, expectedMapSize);
                 }
             }
             params->removeEntryByName(PARAMETER_KEY_QP_OFFSET_MAP);
diff --git a/media/codec2/sfplugin/CCodecConfig.cpp b/media/codec2/sfplugin/CCodecConfig.cpp
index db59227..36725ec 100644
--- a/media/codec2/sfplugin/CCodecConfig.cpp
+++ b/media/codec2/sfplugin/CCodecConfig.cpp
@@ -1890,6 +1890,9 @@
         if (mDomain == (IS_VIDEO | IS_ENCODER)) {
             AString qpOffsetRects;
             if (params->findString(PARAMETER_KEY_QP_OFFSET_RECTS, &qpOffsetRects)) {
+                int width, height;
+                mInputFormat->findInt32("width", &width);
+                mInputFormat->findInt32("height", &height);
                 std::vector<C2QpOffsetRectStruct> c2QpOffsetRects;
                 char mutableStrQpOffsetRects[strlen(qpOffsetRects.c_str()) + 1];
                 strcpy(mutableStrQpOffsetRects, qpOffsetRects.c_str());
@@ -1899,11 +1902,17 @@
                     if (sscanf(box, "%d,%d-%d,%d=%d", &top, &left, &bottom, &right, &offset) == 5) {
                         left = c2_max(0, left);
                         top = c2_max(0, top);
+                        right = c2_min(right, width);
+                        bottom = c2_min(bottom, height);
                         if (right > left && bottom > top) {
                             C2Rect rect(right - left, bottom - top);
                             rect.at(left, top);
                             c2QpOffsetRects.push_back(C2QpOffsetRectStruct(rect, offset));
+                        } else {
+                            ALOGE("Rects configuration %s is not valid.", box);
                         }
+                    } else {
+                        ALOGE("Rects configuration %s doesn't follow the string pattern.", box);
                     }
                     box = strtok(nullptr, ";");
                 }
diff --git a/media/codec2/vndk/include/C2BqBufferPriv.h b/media/codec2/vndk/include/C2BqBufferPriv.h
index 1e8dd40..806932c 100644
--- a/media/codec2/vndk/include/C2BqBufferPriv.h
+++ b/media/codec2/vndk/include/C2BqBufferPriv.h
@@ -44,7 +44,77 @@
  *
  * If a specific HGBP is configured, the HGBP acts as an allocator for creating graphic blocks.
  *
- * TODO: add more ducumentation(graphic block life-cycle, waitable object and workaounds)
+ *
+ * HGBP/IGBP and the BlockPool
+ *
+ * GraphicBuffer(s) from BufferQueue(IGBP/IGBC) are based on slot id.
+ * A created GraphicBuffer occupies a slot(so the GraphicBuffer has a slot-id).
+ * A GraphicBuffer is produced and consumed and recyled based on the slot-id
+ * w.r.t. BufferQueue.
+ *
+ * HGBP::dequeueBuffer() returns a slot id where the slot has an available GraphicBuffer.
+ * If it is necessary, HGBP allocates a new GraphicBuffer to the slot and indicates
+ * that a new buffer is allocated as return flag.
+ * To retrieve the GraphicBuffer, HGBP::requestBuffer() along with the slot id
+ * is required. In order to save HGBP remote calls, the blockpool caches the
+ * allocated GraphicBuffer(s) along with the slot information.
+ *
+ * The blockpool provides C2GraphicBlock upon \fetchGraphicBlock().
+ * The C2GraphicBlock has a native handle, which is extracted from a GraphicBuffer
+ * and then cloned for independent life-cycle with the GraphicBuffer. The GraphicBuffer
+ * is allocated by HGBP::dequeueBuffer() and retrieved by HGBP::requestBuffer()
+ * if there is a HGBP configured.
+ *
+ *
+ * Life-cycle of C2GraphicBlock
+ *
+ * The decoder HAL writes a decoded frame into C2GraphicBlock. Upon
+ * completion, the component sends the block to the client in the remote process
+ * (i.e. to MediaCodec). The remote process renders the frame into the output surface
+ * via IGBP::queueBuffer() (Note: this is not hidlized.).
+ *
+ * If the decoder HAL destroys the C2GraphicBlock without transferring to the
+ * client, the destroy request goes to the BlockPool. Then
+ * the BlockPool free the associated GraphicBuffer from a slot to
+ * HGBP in order to recycle via HGBP::cancelBuffer().
+ *
+ *
+ * Clearing the Cache(GraphicBuffer)
+ *
+ * When the output surface is switched to a new surface, The GraphicBuffers from
+ * the old surface is either migrated or cleared.
+ *
+ * The GraphicBuffer(s) still in use are migrated to a new surface during
+ * configuration via HGBP::attachBuffer(). The GraphicBuffer(s) not in use are
+ * cleared from the cache inside the BlockPool.
+ *
+ * When the surface is switched to a null surface, all the
+ * GraphicBuffers in the cache are cleared.
+ *
+ *
+ * Workaround w.r.t. b/322731059 (Deferring cleaning the cache)
+ *
+ * Some vendor devices have issues with graphic buffer lifecycle management,
+ * where the graphic buffers get released even when the cloned native handles
+ * in the remote process are not closed yet. This issue led to rare crashes
+ * for those devices when the cache is cleared early.
+ *
+ * We workarounded the crash by deferring the cleaning of the cache.
+ * The workaround is not enabled by default, and can be enabled via a
+ * system property as shown below:
+ *
+ *        'debug.codec2.bqpool_dealloc_after_stop' = 1
+ *
+ * Configuring the debug flag will call \::setDeferDeallocationAfterStop()
+ * after the blockpool creation. This will enable the deferring.
+ *
+ * After enabling the deferring, clearing the GraphicBuffer is delayed until
+ *  1) \::clearDeferredBlocks() is called.
+ *        Typically after HAL processes stop() request.
+ *  2) Or a new ::fetchGraphicBlock() is called.
+ *
+ *  Since the deferring will delay the deallocation, the deferring will result
+ *  in more memory consumption during the brief period.
  */
 class C2BufferQueueBlockPool : public C2BlockPool {
 public:
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index 9d9b574..f6ddc3e 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -574,7 +574,7 @@
      * For privacy, the following usages can not be recorded: AAUDIO_VOICE_COMMUNICATION*,
      * AAUDIO_USAGE_NOTIFICATION*, AAUDIO_USAGE_ASSISTANCE* and {@link #AAUDIO_USAGE_ASSISTANT}.
      *
-     * On <a href="/reference/android/os/Build.VERSION_CODES#Q">Build.VERSION_CODES</a>,
+     * On <a href="/reference/android/os/Build.VERSION_CODES#Q">Q</a>,
      * this means only {@link #AAUDIO_USAGE_MEDIA} and {@link #AAUDIO_USAGE_GAME} may be captured.
      *
      * See <a href="/reference/android/media/AudioAttributes.html#ALLOW_CAPTURE_BY_ALL">
diff --git a/media/libaaudio/src/core/AAudioStreamParameters.cpp b/media/libaaudio/src/core/AAudioStreamParameters.cpp
index 3e51575..67fc668 100644
--- a/media/libaaudio/src/core/AAudioStreamParameters.cpp
+++ b/media/libaaudio/src/core/AAudioStreamParameters.cpp
@@ -23,13 +23,6 @@
 
 using namespace aaudio;
 
-// TODO These defines should be moved to a central place in audio.
-#define SAMPLES_PER_FRAME_MIN        1
-#define SAMPLES_PER_FRAME_MAX        FCC_LIMIT
-#define SAMPLE_RATE_HZ_MIN           8000
-// HDMI supports up to 32 channels at 1536000 Hz.
-#define SAMPLE_RATE_HZ_MAX           1600000
-
 void AAudioStreamParameters::copyFrom(const AAudioStreamParameters &other) {
     mSamplesPerFrame      = other.mSamplesPerFrame;
     mSampleRate           = other.mSampleRate;
@@ -73,8 +66,8 @@
 }
 
 aaudio_result_t AAudioStreamParameters::validate() const {
-    if (mSamplesPerFrame != AAUDIO_UNSPECIFIED
-        && (mSamplesPerFrame < SAMPLES_PER_FRAME_MIN || mSamplesPerFrame > SAMPLES_PER_FRAME_MAX)) {
+    if (mSamplesPerFrame != AAUDIO_UNSPECIFIED && (mSamplesPerFrame < CHANNEL_COUNT_MIN_AAUDIO ||
+                                                   mSamplesPerFrame > CHANNEL_COUNT_MAX_AAUDIO)) {
         ALOGD("channelCount out of range = %d", mSamplesPerFrame);
         return AAUDIO_ERROR_OUT_OF_RANGE;
     }
@@ -105,8 +98,8 @@
     aaudio_result_t result = isFormatValid (mAudioFormat);
     if (result != AAUDIO_OK) return result;
 
-    if (mSampleRate != AAUDIO_UNSPECIFIED
-        && (mSampleRate < SAMPLE_RATE_HZ_MIN || mSampleRate > SAMPLE_RATE_HZ_MAX)) {
+    if (mSampleRate != AAUDIO_UNSPECIFIED &&
+        (mSampleRate < SAMPLE_RATE_HZ_MIN_AAUDIO || mSampleRate > SAMPLE_RATE_HZ_MAX_IEC610937)) {
         ALOGD("sampleRate out of range = %d", mSampleRate);
         return AAUDIO_ERROR_INVALID_RATE;
     }
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
index ac4e2b3..4f32883 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.cpp
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -46,15 +46,6 @@
 #define AAUDIO_MMAP_POLICY_DEFAULT             AAUDIO_POLICY_NEVER
 #define AAUDIO_MMAP_EXCLUSIVE_POLICY_DEFAULT   AAUDIO_POLICY_NEVER
 
-// These values are for a pre-check before we ask the lower level service to open a stream.
-// So they are just outside the maximum conceivable range of value,
-// on the edge of being ridiculous.
-// TODO These defines should be moved to a central place in audio.
-#define SAMPLES_PER_FRAME_MIN        1
-#define SAMPLES_PER_FRAME_MAX        FCC_LIMIT
-#define SAMPLE_RATE_HZ_MIN           8000
-// HDMI supports up to 32 channels at 1536000 Hz.
-#define SAMPLE_RATE_HZ_MAX           1600000
 #define FRAMES_PER_DATA_CALLBACK_MIN 1
 #define FRAMES_PER_DATA_CALLBACK_MAX (1024 * 1024)
 
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index 91bc700..7af6eb1 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -130,11 +130,7 @@
 }
 
 AudioRecord::AudioRecord(const AttributionSourceState &client)
-    : mActive(false), mStatus(NO_INIT), mClientAttributionSource(client),
-      mSessionId(AUDIO_SESSION_ALLOCATE), mPreviousPriority(ANDROID_PRIORITY_NORMAL),
-      mPreviousSchedulingGroup(SP_DEFAULT), mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
-      mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE), mSelectedMicDirection(MIC_DIRECTION_UNSPECIFIED),
-      mSelectedMicFieldDimension(MIC_FIELD_DIMENSION_DEFAULT)
+    : mClientAttributionSource(client)
 {
 }
 
@@ -154,13 +150,7 @@
         audio_port_handle_t selectedDeviceId,
         audio_microphone_direction_t selectedMicDirection,
         float microphoneFieldDimension)
-    : mActive(false),
-      mStatus(NO_INIT),
-      mClientAttributionSource(client),
-      mSessionId(AUDIO_SESSION_ALLOCATE),
-      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
-      mPreviousSchedulingGroup(SP_DEFAULT),
-      mProxy(nullptr)
+    : mClientAttributionSource(client)
 {
     uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(mClientAttributionSource.uid));
     pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(mClientAttributionSource.pid));
@@ -199,9 +189,6 @@
 }
 
 void AudioRecord::stopAndJoinCallbacks() {
-    // Prevent nullptr crash if it did not open properly.
-    if (mStatus != NO_ERROR) return;
-
     // Make sure that callback function exits in the case where
     // it is looping on buffer empty condition in obtainBuffer().
     // Otherwise the callback thread will never exit.
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 34f2a1c..1a575a7 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -233,25 +233,9 @@
     return NO_ERROR;
 }
 
-AudioTrack::AudioTrack() : AudioTrack(AttributionSourceState())
-{
-}
-
 AudioTrack::AudioTrack(const AttributionSourceState& attributionSource)
-    : mStatus(NO_INIT),
-      mState(STATE_STOPPED),
-      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
-      mPreviousSchedulingGroup(SP_DEFAULT),
-      mPausedPosition(0),
-      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
-      mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE),
-      mClientAttributionSource(attributionSource),
-      mAudioTrackCallback(new AudioTrackCallback())
+    : mClientAttributionSource(attributionSource)
 {
-    mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
-    mAttributes.usage = AUDIO_USAGE_UNKNOWN;
-    mAttributes.flags = AUDIO_FLAG_NONE;
-    strcpy(mAttributes.tags, "");
 }
 
 AudioTrack::AudioTrack(
@@ -271,21 +255,12 @@
         bool doNotReconnect,
         float maxRequiredSpeed,
         audio_port_handle_t selectedDeviceId)
-    : mStatus(NO_INIT),
-      mState(STATE_STOPPED),
-      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
-      mPreviousSchedulingGroup(SP_DEFAULT),
-      mPausedPosition(0),
-      mAudioTrackCallback(new AudioTrackCallback())
 {
-    mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
-
-    // make_unique does not aggregate init until c++20
-    mSetParams = std::unique_ptr<SetParams>{
-            new SetParams{streamType, sampleRate, format, channelMask, frameCount, flags, callback,
-                          notificationFrames, 0 /*sharedBuffer*/, false /*threadCanCallJava*/,
-                          sessionId, transferType, offloadInfo, attributionSource, pAttributes,
-                          doNotReconnect, maxRequiredSpeed, selectedDeviceId}};
+    mSetParams = std::make_unique<SetParams>(
+        streamType, sampleRate, format, channelMask, frameCount, flags, callback,
+        notificationFrames, nullptr /*sharedBuffer*/, false /*threadCanCallJava*/,
+        sessionId, transferType, offloadInfo, attributionSource, pAttributes,
+        doNotReconnect, maxRequiredSpeed, selectedDeviceId);
 }
 
 namespace {
@@ -344,13 +319,6 @@
         const audio_attributes_t* pAttributes,
         bool doNotReconnect,
         float maxRequiredSpeed)
-    : mStatus(NO_INIT),
-      mState(STATE_STOPPED),
-      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
-      mPreviousSchedulingGroup(SP_DEFAULT),
-      mPausedPosition(0),
-      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
-      mAudioTrackCallback(new AudioTrackCallback())
 {
     mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
 
@@ -400,9 +368,6 @@
 }
 
 void AudioTrack::stopAndJoinCallbacks() {
-    // Prevent nullptr crash if it did not open properly.
-    if (mStatus != NO_ERROR) return;
-
     // Make sure that callback function exits in the case where
     // it is looping on buffer full condition in obtainBuffer().
     // Otherwise the callback thread will never exit.
@@ -919,6 +884,7 @@
     const int64_t beginNs = systemTime();
 
     AutoMutex lock(mLock);
+    if (mProxy == nullptr) return;  // not successfully initialized.
     mediametrics::Defer defer([&]() {
         mediametrics::LogItem(mMetricsId)
             .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_STOP)
diff --git a/media/libaudioclient/PolicyAidlConversion.cpp b/media/libaudioclient/PolicyAidlConversion.cpp
index a71bb18..7588582 100644
--- a/media/libaudioclient/PolicyAidlConversion.cpp
+++ b/media/libaudioclient/PolicyAidlConversion.cpp
@@ -377,6 +377,8 @@
             return AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS;
         case media::AudioPolicyForcedConfig::ENCODED_SURROUND_MANUAL:
             return AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL;
+        case media::AudioPolicyForcedConfig::BT_BLE:
+            return AUDIO_POLICY_FORCE_BT_BLE;
     }
     return unexpected(BAD_VALUE);
 }
@@ -416,6 +418,8 @@
             return media::AudioPolicyForcedConfig::ENCODED_SURROUND_ALWAYS;
         case AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL:
             return media::AudioPolicyForcedConfig::ENCODED_SURROUND_MANUAL;
+        case AUDIO_POLICY_FORCE_BT_BLE:
+            return media::AudioPolicyForcedConfig::BT_BLE;
         case AUDIO_POLICY_FORCE_CFG_CNT:
             break;
     }
diff --git a/media/libaudioclient/aidl/android/media/AudioPolicyForcedConfig.aidl b/media/libaudioclient/aidl/android/media/AudioPolicyForcedConfig.aidl
index 2255d4c..111bb2f 100644
--- a/media/libaudioclient/aidl/android/media/AudioPolicyForcedConfig.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPolicyForcedConfig.aidl
@@ -36,4 +36,5 @@
     ENCODED_SURROUND_NEVER = 13,
     ENCODED_SURROUND_ALWAYS = 14,
     ENCODED_SURROUND_MANUAL = 15,
+    BT_BLE = 16,
 }
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index 00f2c7a..d4479ef 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -681,7 +681,7 @@
 
     // Current client state:  false = stopped, true = active.  Protected by mLock.  If more states
     // are added, consider changing this to enum State { ... } mState as in AudioTrack.
-    bool                    mActive;
+    bool mActive = false;
 
     // for client callback handler
 
@@ -708,7 +708,7 @@
     Modulo<uint32_t>        mNewPosition;           // in frames
     uint32_t                mUpdatePeriod;          // in frames, zero means no EVENT_NEW_POS
 
-    status_t                mStatus;
+    status_t mStatus = NO_INIT;
 
     android::content::AttributionSourceState mClientAttributionSource; // Owner's attribution source
 
@@ -736,8 +736,8 @@
                                                     // held to read or write those bits reliably.
     audio_input_flags_t     mOrigFlags;             // as specified in constructor or set(), const
 
-    audio_session_t         mSessionId;
-    audio_port_handle_t     mPortId;                    // Id from Audio Policy Manager
+    audio_session_t mSessionId = AUDIO_SESSION_ALLOCATE;
+    audio_port_handle_t mPortId = AUDIO_PORT_HANDLE_NONE;
 
     /**
      * mLogSessionId is a string identifying this AudioRecord for the metrics service.
@@ -756,9 +756,9 @@
     sp<IMemory>             mBufferMemory;
     audio_io_handle_t       mInput = AUDIO_IO_HANDLE_NONE; // from AudioSystem::getInputforAttr()
 
-    int                     mPreviousPriority;  // before start()
-    SchedPolicy             mPreviousSchedulingGroup;
-    bool                    mAwaitBoost;    // thread should wait for priority boost before running
+    int mPreviousPriority = ANDROID_PRIORITY_NORMAL;  // before start()
+    SchedPolicy mPreviousSchedulingGroup = SP_DEFAULT;
+    bool mAwaitBoost = false;  // thread should wait for priority boost before running
 
     // The proxy should only be referenced while a lock is held because the proxy isn't
     // multi-thread safe.
@@ -799,14 +799,17 @@
 
     // For Device Selection API
     //  a value of AUDIO_PORT_HANDLE_NONE indicated default (AudioPolicyManager) routing.
-    audio_port_handle_t     mSelectedDeviceId; // Device requested by the application.
-    audio_port_handle_t     mRoutedDeviceId;   // Device actually selected by audio policy manager:
-                                              // May not match the app selection depending on other
-                                              // activity and connected devices
+
+    // Device requested by the application.
+    audio_port_handle_t     mSelectedDeviceId = AUDIO_PORT_HANDLE_NONE;
+    // Device actually selected by AudioPolicyManager: This may not match the app
+    // selection depending on other activity and connected devices
+    audio_port_handle_t     mRoutedDeviceId = AUDIO_PORT_HANDLE_NONE;
+
     wp<AudioSystem::AudioDeviceCallback> mDeviceCallback;
 
-    audio_microphone_direction_t mSelectedMicDirection;
-    float mSelectedMicFieldDimension;
+    audio_microphone_direction_t mSelectedMicDirection = MIC_DIRECTION_UNSPECIFIED;
+    float mSelectedMicFieldDimension = MIC_FIELD_DIMENSION_DEFAULT;
 
     int32_t                    mMaxSharedAudioHistoryMs = 0;
     std::string                mSharedAudioPackageName = {};
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index 19780ae..3a001a4 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -257,9 +257,7 @@
     /* Constructs an uninitialized AudioTrack. No connection with
      * AudioFlinger takes place.  Use set() after this.
      */
-                        AudioTrack();
-
-                        AudioTrack(const AttributionSourceState& attributionSourceState);
+    explicit AudioTrack(const AttributionSourceState& attributionSourceState = {});
 
     /* Creates an AudioTrack object and registers it with AudioFlinger.
      * Once created, the track needs to be started before it can be used.
@@ -1312,11 +1310,11 @@
     sp<IMemory>             mSharedBuffer;
     transfer_type           mTransfer;
     audio_offload_info_t    mOffloadInfoCopy;
-    audio_attributes_t      mAttributes;
+    audio_attributes_t mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
 
     size_t                  mFrameSize;             // frame size in bytes
 
-    status_t                mStatus;
+    status_t mStatus = NO_INIT;
 
     // can change dynamically when IAudioTrack invalidated
     uint32_t                mLatency;               // in ms
@@ -1329,7 +1327,7 @@
         STATE_PAUSED_STOPPING,
         STATE_FLUSHED,
         STATE_STOPPING,
-    }                       mState;
+    } mState = STATE_STOPPED;
 
     static constexpr const char *stateToString(State state)
     {
@@ -1459,8 +1457,8 @@
 
     mutable Mutex           mLock;
 
-    int                     mPreviousPriority;          // before start()
-    SchedPolicy             mPreviousSchedulingGroup;
+    int mPreviousPriority = ANDROID_PRIORITY_NORMAL;  // before start()
+    SchedPolicy mPreviousSchedulingGroup = SP_DEFAULT;
     bool                    mAwaitBoost;    // thread should wait for priority boost before running
 
     // The proxy should only be referenced while a lock is held because the proxy isn't
@@ -1472,14 +1470,17 @@
     sp<AudioTrackClientProxy>       mProxy;         // primary owner of the memory
 
     bool                    mInUnderrun;            // whether track is currently in underrun state
-    uint32_t                mPausedPosition;
+    uint32_t mPausedPosition = 0;
 
     // For Device Selection API
     //  a value of AUDIO_PORT_HANDLE_NONE indicated default (AudioPolicyManager) routing.
-    audio_port_handle_t    mSelectedDeviceId; // Device requested by the application.
-    audio_port_handle_t    mRoutedDeviceId;   // Device actually selected by audio policy manager:
-                                              // May not match the app selection depending on other
-                                              // activity and connected devices.
+
+    // Device requested by the application.
+    audio_port_handle_t mSelectedDeviceId = AUDIO_PORT_HANDLE_NONE;
+
+    // Device actually selected by AudioPolicyManager: This may not match the app
+    // selection depending on other activity and connected devices.
+    audio_port_handle_t mRoutedDeviceId = AUDIO_PORT_HANDLE_NONE;
 
     sp<media::VolumeHandler>       mVolumeHandler;
 
@@ -1537,7 +1538,7 @@
         Mutex mAudioTrackCbLock;
         wp<media::IAudioTrackCallback> mCallback;
     };
-    sp<AudioTrackCallback> mAudioTrackCallback;
+    sp<AudioTrackCallback> mAudioTrackCallback = sp<AudioTrackCallback>::make();
 };
 
 }; // namespace android
diff --git a/media/libaudioclient/tests/audio_test_utils.cpp b/media/libaudioclient/tests/audio_test_utils.cpp
index 745c7d1..1599839 100644
--- a/media/libaudioclient/tests/audio_test_utils.cpp
+++ b/media/libaudioclient/tests/audio_test_utils.cpp
@@ -294,6 +294,7 @@
         ts.getBestTimestamp(&position, &timeNs, ExtendedTimestamp::TIMEBASE_MONOTONIC, &location) ==
                 OK) {
         // Use audio timestamp.
+        std::lock_guard l(mMutex);
         timeUs = timeNs / 1000 -
                  (position - mNumFramesReceived + mNumFramesLost) * usPerSec / mSampleRate;
     } else {
@@ -322,6 +323,7 @@
         } else {
             numLostBytes = 0;
         }
+        std::lock_guard l(mMutex);
         const int64_t timestampUs =
                 ((1000000LL * mNumFramesReceived) + (mRecord->getSampleRate() >> 1)) /
                 mRecord->getSampleRate();
@@ -335,6 +337,7 @@
     if (buffer.size() == 0) {
         ALOGW("Nothing is available from AudioRecord callback buffer");
     } else {
+        std::lock_guard l(mMutex);
         const size_t bufferSize = buffer.size();
         const int64_t timestampUs =
                 ((1000000LL * mNumFramesReceived) + (mRecord->getSampleRate() >> 1)) /
@@ -359,17 +362,24 @@
 
 void AudioCapture::onOverrun() {
     ALOGV("received event overrun");
-    mBufferOverrun = true;
 }
 
 void AudioCapture::onMarker(uint32_t markerPosition) {
     ALOGV("received Callback at position %d", markerPosition);
-    mReceivedCbMarkerAtPosition = markerPosition;
+    {
+        std::lock_guard l(mMutex);
+        mReceivedCbMarkerAtPosition = markerPosition;
+    }
+    mMarkerCondition.notify_all();
 }
 
 void AudioCapture::onNewPos(uint32_t markerPosition) {
     ALOGV("received Callback at position %d", markerPosition);
-    mReceivedCbMarkerCount++;
+    {
+        std::lock_guard l(mMutex);
+        mReceivedCbMarkerCount = mReceivedCbMarkerCount.value_or(0) + 1;
+    }
+    mMarkerCondition.notify_all();
 }
 
 void AudioCapture::onNewIAudioRecord() {
@@ -387,20 +397,7 @@
       mFlags(flags),
       mSessionId(sessionId),
       mTransferType(transferType),
-      mAttributes(attributes) {
-    mFrameCount = 0;
-    mNotificationFrames = 0;
-    mNumFramesToRecord = 0;
-    mNumFramesReceived = 0;
-    mNumFramesLost = 0;
-    mBufferOverrun = false;
-    mMarkerPosition = 0;
-    mMarkerPeriod = 0;
-    mReceivedCbMarkerAtPosition = -1;
-    mReceivedCbMarkerCount = 0;
-    mState = REC_NO_INIT;
-    mStopRecording = false;
-}
+      mAttributes(attributes) {}
 
 AudioCapture::~AudioCapture() {
     if (mOutFileFd > 0) close(mOutFileFd);
@@ -531,25 +528,32 @@
     const int maxTries = MAX_WAIT_TIME_MS / WAIT_PERIOD_MS;
     int counter = 0;
     size_t nonContig = 0;
-    while (mNumFramesReceived < mNumFramesToRecord) {
+    int64_t numFramesReceived;
+    {
+        std::lock_guard l(mMutex);
+        numFramesReceived = mNumFramesReceived;
+    }
+    while (numFramesReceived < mNumFramesToRecord) {
         AudioRecord::Buffer recordBuffer;
         recordBuffer.frameCount = mNotificationFrames;
         status_t status = mRecord->obtainBuffer(&recordBuffer, 1, &nonContig);
         if (OK == status) {
             const int64_t timestampUs =
-                    ((1000000LL * mNumFramesReceived) + (mRecord->getSampleRate() >> 1)) /
+                    ((1000000LL * numFramesReceived) + (mRecord->getSampleRate() >> 1)) /
                     mRecord->getSampleRate();
             RawBuffer buff{-1, timestampUs, static_cast<int32_t>(recordBuffer.size())};
             memcpy(buff.mData.get(), recordBuffer.data(), recordBuffer.size());
             buffer = std::move(buff);
-            mNumFramesReceived += recordBuffer.size() / mRecord->frameSize();
+            numFramesReceived += recordBuffer.size() / mRecord->frameSize();
             mRecord->releaseBuffer(&recordBuffer);
             counter = 0;
         } else if (WOULD_BLOCK == status) {
             // if not received a buffer for MAX_WAIT_TIME_MS, something has gone wrong
-            if (counter == maxTries) return TIMED_OUT;
-            counter++;
+            if (counter++ == maxTries) status = TIMED_OUT;
         }
+        std::lock_guard l(mMutex);
+        mNumFramesReceived = numFramesReceived;
+        if (TIMED_OUT == status) return status;
     }
     return OK;
 }
@@ -577,7 +581,12 @@
 status_t AudioCapture::audioProcess() {
     RawBuffer buffer;
     status_t status = OK;
-    while (mNumFramesReceived < mNumFramesToRecord && status == OK) {
+    int64_t numFramesReceived;
+    {
+        std::lock_guard l(mMutex);
+        numFramesReceived = mNumFramesReceived;
+    }
+    while (numFramesReceived < mNumFramesToRecord && status == OK) {
         if (mTransferType == AudioRecord::TRANSFER_CALLBACK)
             status = obtainBufferCb(buffer);
         else
@@ -586,10 +595,52 @@
             const char* ptr = static_cast<const char*>(static_cast<void*>(buffer.mData.get()));
             write(mOutFileFd, ptr, buffer.mCapacity);
         }
+        std::lock_guard l(mMutex);
+        numFramesReceived = mNumFramesReceived;
     }
     return OK;
 }
 
+uint32_t AudioCapture::getMarkerPeriod() const {
+    std::lock_guard l(mMutex);
+    return mMarkerPeriod;
+}
+
+uint32_t AudioCapture::getMarkerPosition() const {
+    std::lock_guard l(mMutex);
+    return mMarkerPosition;
+}
+
+void AudioCapture::setMarkerPeriod(uint32_t markerPeriod) {
+    std::lock_guard l(mMutex);
+    mMarkerPeriod = markerPeriod;
+}
+
+void AudioCapture::setMarkerPosition(uint32_t markerPosition) {
+    std::lock_guard l(mMutex);
+    mMarkerPosition = markerPosition;
+}
+
+uint32_t AudioCapture::waitAndGetReceivedCbMarkerAtPosition() const {
+    std::unique_lock lock(mMutex);
+    android::base::ScopedLockAssertion lock_assertion(mMutex);
+    mMarkerCondition.wait_for(lock, std::chrono::seconds(3), [this]() {
+        android::base::ScopedLockAssertion lock_assertion(mMutex);
+        return mReceivedCbMarkerAtPosition.has_value();
+    });
+    return mReceivedCbMarkerAtPosition.value_or(~0);
+}
+
+uint32_t AudioCapture::waitAndGetReceivedCbMarkerCount() const {
+    std::unique_lock lock(mMutex);
+    android::base::ScopedLockAssertion lock_assertion(mMutex);
+    mMarkerCondition.wait_for(lock, std::chrono::seconds(3), [this]() {
+        android::base::ScopedLockAssertion lock_assertion(mMutex);
+        return mReceivedCbMarkerCount.has_value();
+    });
+    return mReceivedCbMarkerCount.value_or(0);
+}
+
 status_t listAudioPorts(std::vector<audio_port_v7>& portsVec) {
     int attempts = 5;
     status_t status;
diff --git a/media/libaudioclient/tests/audio_test_utils.h b/media/libaudioclient/tests/audio_test_utils.h
index 40c3365..022ecf3 100644
--- a/media/libaudioclient/tests/audio_test_utils.h
+++ b/media/libaudioclient/tests/audio_test_utils.h
@@ -146,8 +146,8 @@
     ~AudioCapture();
     size_t onMoreData(const AudioRecord::Buffer& buffer) override EXCLUDES(mMutex);
     void onOverrun() override;
-    void onMarker(uint32_t markerPosition) override;
-    void onNewPos(uint32_t newPos) override;
+    void onMarker(uint32_t markerPosition) override EXCLUDES(mMutex);
+    void onNewPos(uint32_t newPos) override EXCLUDES(mMutex);
     void onNewIAudioRecord() override;
     status_t create();
     status_t setRecordDuration(float durationInSec);
@@ -157,20 +157,19 @@
     status_t start(AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE,
                    audio_session_t triggerSession = AUDIO_SESSION_NONE);
     status_t obtainBufferCb(RawBuffer& buffer) EXCLUDES(mMutex);
-    status_t obtainBuffer(RawBuffer& buffer);
-    status_t audioProcess();
+    status_t obtainBuffer(RawBuffer& buffer) EXCLUDES(mMutex);
+    status_t audioProcess() EXCLUDES(mMutex);
     status_t stop() EXCLUDES(mMutex);
+    uint32_t getMarkerPeriod() const EXCLUDES(mMutex);
+    uint32_t getMarkerPosition() const EXCLUDES(mMutex);
+    void setMarkerPeriod(uint32_t markerPeriod) EXCLUDES(mMutex);
+    void setMarkerPosition(uint32_t markerPosition) EXCLUDES(mMutex);
+    uint32_t waitAndGetReceivedCbMarkerAtPosition() const EXCLUDES(mMutex);
+    uint32_t waitAndGetReceivedCbMarkerCount() const EXCLUDES(mMutex);
 
-    uint32_t mFrameCount;
-    uint32_t mNotificationFrames;
-    int64_t mNumFramesToRecord;
-    int64_t mNumFramesReceived;
-    int64_t mNumFramesLost;
-    uint32_t mMarkerPosition;
-    uint32_t mMarkerPeriod;
-    uint32_t mReceivedCbMarkerAtPosition;
-    uint32_t mReceivedCbMarkerCount;
-    bool mBufferOverrun;
+    uint32_t mFrameCount = 0;
+    uint32_t mNotificationFrames = 0;
+    int64_t mNumFramesToRecord = 0;
 
     enum State {
         REC_NO_INIT,
@@ -191,14 +190,23 @@
 
     size_t mMaxBytesPerCallback = 2048;
     sp<AudioRecord> mRecord;
-    State mState;
-    bool mStopRecording GUARDED_BY(mMutex);
+    State mState = REC_NO_INIT;
+    bool mStopRecording GUARDED_BY(mMutex) = false;
     std::string mFileName;
     int mOutFileFd = -1;
 
     mutable std::mutex mMutex;
     std::condition_variable mCondition;
     std::deque<RawBuffer> mBuffersReceived GUARDED_BY(mMutex);
+
+    mutable std::condition_variable mMarkerCondition;
+    uint32_t mMarkerPeriod GUARDED_BY(mMutex) = 0;
+    uint32_t mMarkerPosition GUARDED_BY(mMutex) = 0;
+    std::optional<uint32_t> mReceivedCbMarkerCount GUARDED_BY(mMutex);
+    std::optional<uint32_t> mReceivedCbMarkerAtPosition GUARDED_BY(mMutex);
+
+    int64_t mNumFramesReceived GUARDED_BY(mMutex) = 0;
+    int64_t mNumFramesLost GUARDED_BY(mMutex) = 0;
 };
 
 #endif  // AUDIO_TEST_UTILS_H_
diff --git a/media/libaudioclient/tests/audioeffect_tests.cpp b/media/libaudioclient/tests/audioeffect_tests.cpp
index 59d0c6a..791319e 100644
--- a/media/libaudioclient/tests/audioeffect_tests.cpp
+++ b/media/libaudioclient/tests/audioeffect_tests.cpp
@@ -556,8 +556,9 @@
     ASSERT_EQ(NO_ERROR, playback->loadResource("/data/local/tmp/bbb_2ch_24kHz_s16le.raw"));
     EXPECT_EQ(NO_ERROR, playback->create());
     EXPECT_EQ(NO_ERROR, playback->start());
-    EXPECT_TRUE(isEffectExistsOnAudioSession(selectedEffectType, selectedEffectUuid,
-                                             kDefaultOutputEffectPriority - 1, sessionId))
+    ASSERT_EQ(ALREADY_EXISTS,
+              isEffectExistsOnAudioSession(selectedEffectType, selectedEffectUuid,
+                                           kDefaultOutputEffectPriority - 1, sessionId))
             << "Effect should have been added. " << type;
     EXPECT_EQ(NO_ERROR, playback->waitForConsumption());
     playback->stop();
diff --git a/media/libaudioclient/tests/audiorecord_tests.cpp b/media/libaudioclient/tests/audiorecord_tests.cpp
index d122508..f2fee8b 100644
--- a/media/libaudioclient/tests/audiorecord_tests.cpp
+++ b/media/libaudioclient/tests/audiorecord_tests.cpp
@@ -28,6 +28,25 @@
 
 using namespace android;
 
+// Test that the basic constructor returns an object that doesn't crash
+// on stop() or destruction.
+
+TEST(AudioRecordTestBasic, EmptyAudioRecord) {
+    AttributionSourceState attributionSource;
+    attributionSource.packageName = "AudioRecordTest";
+    attributionSource.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(getuid()));
+    attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(getpid()));
+    attributionSource.token = sp<BBinder>::make();
+    const auto ar = sp<AudioRecord>::make(attributionSource);
+
+    // test key commands on an unset AudioRecord.
+    EXPECT_EQ(NO_INIT, ar->initCheck());
+    EXPECT_EQ(true, ar->stopped());
+
+    // just don't crash.
+    ar->stop();
+}
+
 class AudioRecordTest : public ::testing::Test {
   public:
     void SetUp() override {
@@ -83,7 +102,10 @@
     }
 
     void TearDown() override {
-        if (mAC) ASSERT_EQ(OK, mAC->stop());
+        if (mAC) {
+            ASSERT_EQ(OK, mAC->stop());
+            mAC.clear();
+        }
     }
 };
 
@@ -149,33 +171,33 @@
 }
 
 TEST_F(AudioRecordTest, TestGetSetMarker) {
-    mAC->mMarkerPosition = (mAC->mNotificationFrames << 3) + (mAC->mNotificationFrames >> 1);
-    EXPECT_EQ(OK, mAC->getAudioRecordHandle()->setMarkerPosition(mAC->mMarkerPosition))
+    mAC->setMarkerPosition((mAC->mNotificationFrames << 3) + (mAC->mNotificationFrames >> 1));
+    EXPECT_EQ(OK, mAC->getAudioRecordHandle()->setMarkerPosition(mAC->getMarkerPosition()))
             << "setMarkerPosition() failed";
     uint32_t marker;
     EXPECT_EQ(OK, mAC->getAudioRecordHandle()->getMarkerPosition(&marker))
             << "getMarkerPosition() failed";
     EXPECT_EQ(OK, mAC->start()) << "start recording failed";
     EXPECT_EQ(OK, mAC->audioProcess()) << "audioProcess failed";
-    // TODO(b/348658586): Properly synchronize callback updates with the test thread.
-    EXPECT_EQ(marker, mAC->mMarkerPosition)
+    EXPECT_EQ(marker, mAC->getMarkerPosition())
             << "configured marker and received marker are different";
-    EXPECT_EQ(mAC->mReceivedCbMarkerAtPosition, mAC->mMarkerPosition)
+    EXPECT_EQ(mAC->waitAndGetReceivedCbMarkerAtPosition(), mAC->getMarkerPosition())
             << "configured marker and received cb marker are different";
 }
 
 TEST_F(AudioRecordTest, TestGetSetMarkerPeriodical) {
-    mAC->mMarkerPeriod = (mAC->mNotificationFrames << 3) + (mAC->mNotificationFrames >> 1);
-    EXPECT_EQ(OK, mAC->getAudioRecordHandle()->setPositionUpdatePeriod(mAC->mMarkerPeriod))
+    mAC->setMarkerPeriod((mAC->mNotificationFrames << 3) + (mAC->mNotificationFrames >> 1));
+    EXPECT_EQ(OK, mAC->getAudioRecordHandle()->setPositionUpdatePeriod(mAC->getMarkerPeriod()))
             << "setPositionUpdatePeriod() failed";
     uint32_t marker;
     EXPECT_EQ(OK, mAC->getAudioRecordHandle()->getPositionUpdatePeriod(&marker))
             << "getPositionUpdatePeriod() failed";
     EXPECT_EQ(OK, mAC->start()) << "start recording failed";
     EXPECT_EQ(OK, mAC->audioProcess()) << "audioProcess failed";
-    // TODO(b/348658586): Properly synchronize callback updates with the test thread.
-    EXPECT_EQ(marker, mAC->mMarkerPeriod) << "configured marker and received marker are different";
-    EXPECT_EQ(mAC->mReceivedCbMarkerCount, mAC->mNumFramesToRecord / mAC->mMarkerPeriod)
+    EXPECT_EQ(marker, mAC->getMarkerPeriod())
+            << "configured marker and received marker are different";
+    EXPECT_EQ(mAC->waitAndGetReceivedCbMarkerCount(),
+              mAC->mNumFramesToRecord / mAC->getMarkerPeriod())
             << "configured marker and received cb marker are different";
 }
 
@@ -202,12 +224,12 @@
         EXPECT_EQ(mSessionId, mAC->getAudioRecordHandle()->getSessionId());
     if (mTransferType != AudioRecord::TRANSFER_CALLBACK) {
         uint32_t marker;
-        mAC->mMarkerPosition = (mAC->mNotificationFrames << 3) + (mAC->mNotificationFrames >> 1);
+        mAC->setMarkerPosition((mAC->mNotificationFrames << 3) + (mAC->mNotificationFrames >> 1));
         EXPECT_EQ(INVALID_OPERATION,
-                  mAC->getAudioRecordHandle()->setMarkerPosition(mAC->mMarkerPosition));
+                  mAC->getAudioRecordHandle()->setMarkerPosition(mAC->getMarkerPosition()));
         EXPECT_EQ(OK, mAC->getAudioRecordHandle()->getMarkerPosition(&marker));
         EXPECT_EQ(INVALID_OPERATION,
-                  mAC->getAudioRecordHandle()->setPositionUpdatePeriod(mAC->mMarkerPosition));
+                  mAC->getAudioRecordHandle()->setPositionUpdatePeriod(mAC->getMarkerPosition()));
         EXPECT_EQ(OK, mAC->getAudioRecordHandle()->getPositionUpdatePeriod(&marker));
     }
     EXPECT_EQ(OK, mAC->start()) << "start recording failed";
diff --git a/media/libaudioclient/tests/audiotrack_tests.cpp b/media/libaudioclient/tests/audiotrack_tests.cpp
index cbe6dc3..cf7d926 100644
--- a/media/libaudioclient/tests/audiotrack_tests.cpp
+++ b/media/libaudioclient/tests/audiotrack_tests.cpp
@@ -25,6 +25,24 @@
 
 using namespace android;
 
+// Test that the basic constructor returns an object that doesn't crash
+// on stop() or destruction.
+
+TEST(AudioTrackTestBasic, EmptyAudioTrack) {
+    AttributionSourceState attributionSource;
+    attributionSource.packageName = "AudioTrackTest";
+    attributionSource.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(getuid()));
+    attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(getpid()));
+    attributionSource.token = sp<BBinder>::make();
+    const auto at = sp<AudioTrack>::make(attributionSource);
+
+    EXPECT_EQ(NO_INIT, at->initCheck());
+    EXPECT_EQ(true, at->stopped());
+
+    // ensure we do not crash.
+    at->stop();
+}
+
 TEST(AudioTrackTest, TestPlayTrack) {
     const auto ap = sp<AudioPlayback>::make(44100 /* sampleRate */, AUDIO_FORMAT_PCM_16_BIT,
                                             AUDIO_CHANNEL_OUT_STEREO, AUDIO_OUTPUT_FLAG_NONE,
diff --git a/media/libaudiohal/impl/EffectConversionHelperAidl.cpp b/media/libaudiohal/impl/EffectConversionHelperAidl.cpp
index ff6126d..a13903b 100644
--- a/media/libaudiohal/impl/EffectConversionHelperAidl.cpp
+++ b/media/libaudiohal/impl/EffectConversionHelperAidl.cpp
@@ -29,6 +29,7 @@
 #include <system/audio_effects/effect_visualizer.h>
 
 #include <utils/Log.h>
+#include <Utils.h>
 
 #include "EffectConversionHelperAidl.h"
 #include "EffectProxy.h"
@@ -37,18 +38,20 @@
 namespace effect {
 
 using ::aidl::android::aidl_utils::statusTFromBinderStatus;
+using ::aidl::android::hardware::audio::common::getChannelCount;
 using ::aidl::android::hardware::audio::effect::CommandId;
 using ::aidl::android::hardware::audio::effect::Descriptor;
 using ::aidl::android::hardware::audio::effect::Flags;
 using ::aidl::android::hardware::audio::effect::IEffect;
 using ::aidl::android::hardware::audio::effect::Parameter;
 using ::aidl::android::hardware::audio::effect::State;
+using ::aidl::android::media::audio::common::AudioChannelLayout;
 using ::aidl::android::media::audio::common::AudioDeviceDescription;
 using ::aidl::android::media::audio::common::AudioMode;
 using ::aidl::android::media::audio::common::AudioSource;
-using ::android::hardware::EventFlag;
 using android::effect::utils::EffectParamReader;
 using android::effect::utils::EffectParamWriter;
+using android::hardware::EventFlag;
 
 using ::android::status_t;
 
@@ -519,5 +522,15 @@
     return OK;
 }
 
+size_t EffectConversionHelperAidl::getAudioChannelCount() const {
+    return getChannelCount(mCommon.input.base.channelMask,
+                           ~AudioChannelLayout::LAYOUT_HAPTIC_AB /* mask */);
+}
+
+size_t EffectConversionHelperAidl::getHapticChannelCount() const {
+    return getChannelCount(mCommon.input.base.channelMask,
+                           AudioChannelLayout::LAYOUT_HAPTIC_AB /* mask */);
+}
+
 }  // namespace effect
 }  // namespace android
diff --git a/media/libaudiohal/impl/EffectConversionHelperAidl.h b/media/libaudiohal/impl/EffectConversionHelperAidl.h
index 29c5a83..50b47a9 100644
--- a/media/libaudiohal/impl/EffectConversionHelperAidl.h
+++ b/media/libaudiohal/impl/EffectConversionHelperAidl.h
@@ -49,6 +49,9 @@
     ::aidl::android::hardware::audio::effect::Descriptor getDescriptor() const;
     status_t reopen();
 
+    size_t getAudioChannelCount() const;
+    size_t getHapticChannelCount() const;
+
     uint8_t mOutputAccessMode = EFFECT_BUFFER_ACCESS_WRITE;
 
   protected:
diff --git a/media/libaudiohal/impl/EffectHalAidl.cpp b/media/libaudiohal/impl/EffectHalAidl.cpp
index 0c7f78e..ea4dbf6 100644
--- a/media/libaudiohal/impl/EffectHalAidl.cpp
+++ b/media/libaudiohal/impl/EffectHalAidl.cpp
@@ -15,6 +15,7 @@
  */
 
 #include <cstddef>
+#include <cstring>
 #define LOG_TAG "EffectHalAidl"
 //#define LOG_NDEBUG 0
 
@@ -128,6 +129,7 @@
                ::aidl::android::hardware::audio::effect::getEffectTypeUuidHapticGenerator()) {
         mConversion = std::make_unique<android::effect::AidlConversionHapticGenerator>(
                 effect, sessionId, ioId, desc, mIsProxyEffect);
+        mIsHapticGenerator = true;
     } else if (typeUuid ==
                ::aidl::android::hardware::audio::effect::getEffectTypeUuidLoudnessEnhancer()) {
         mConversion = std::make_unique<android::effect::AidlConversionLoudnessEnhancer>(
@@ -200,7 +202,7 @@
                               ::android::OK == efGroup->wait(kEventFlagDataMqUpdate, &efState,
                                                              1 /* ns */, true /* retry */) &&
                               efState & kEventFlagDataMqUpdate) {
-        ALOGV("%s %s V%d receive dataMQUpdate eventFlag from HAL", __func__, effectName.c_str(),
+        ALOGD("%s %s V%d receive dataMQUpdate eventFlag from HAL", __func__, effectName.c_str(),
               halVersion);
 
         mConversion->reopen();
@@ -216,7 +218,7 @@
     }
 
     size_t available = inputQ->availableToWrite();
-    size_t floatsToWrite = std::min(available, mInBuffer->getSize() / sizeof(float));
+    const size_t floatsToWrite = std::min(available, mInBuffer->getSize() / sizeof(float));
     if (floatsToWrite == 0) {
         ALOGE("%s not able to write, floats in buffer %zu, space in FMQ %zu", __func__,
               mInBuffer->getSize() / sizeof(float), available);
@@ -248,7 +250,7 @@
     }
 
     available = outputQ->availableToRead();
-    size_t floatsToRead = std::min(available, mOutBuffer->getSize() / sizeof(float));
+    const size_t floatsToRead = std::min(available, mOutBuffer->getSize() / sizeof(float));
     if (floatsToRead == 0) {
         ALOGE("%s not able to read, buffer space %zu, floats in FMQ %zu", __func__,
               mOutBuffer->getSize() / sizeof(float), available);
@@ -257,7 +259,8 @@
 
     float *outputRawBuffer = mOutBuffer->audioBuffer()->f32;
     std::vector<float> tempBuffer;
-    if (mConversion->mOutputAccessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
+    // keep original data in the output buffer for accumulate mode or HapticGenerator effect
+    if (mConversion->mOutputAccessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE || mIsHapticGenerator) {
         tempBuffer.resize(floatsToRead);
         outputRawBuffer = tempBuffer.data();
     }
@@ -267,7 +270,31 @@
               mOutBuffer->audioBuffer());
         return INVALID_OPERATION;
     }
-    if (mConversion->mOutputAccessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
+
+    // HapticGenerator needs special handling because the generated haptic samples should append to
+    // the end of audio samples, the generated haptic data pass back from HAL in output FMQ at same
+    // offset as input buffer, here we skip the audio samples in output FMQ and append haptic
+    // samples to the end of input buffer
+    if (mIsHapticGenerator) {
+        static constexpr float kHalFloatSampleLimit = 2.0f;
+        assert(floatsToRead == floatsToWrite);
+        const auto audioChNum = mConversion->getAudioChannelCount();
+        const auto audioSamples =
+                floatsToWrite * audioChNum / (audioChNum + mConversion->getHapticChannelCount());
+        // accumulate or copy input to output, haptic samples remains all zero
+        if (mConversion->mOutputAccessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
+            accumulate_float(mOutBuffer->audioBuffer()->f32, mInBuffer->audioBuffer()->f32,
+                             audioSamples);
+        } else {
+            memcpy_to_float_from_float_with_clamping(mOutBuffer->audioBuffer()->f32,
+                                                     mInBuffer->audioBuffer()->f32, audioSamples,
+                                                     kHalFloatSampleLimit);
+        }
+        // append the haptic sample at the end of input audio samples
+        memcpy_to_float_from_float_with_clamping(mInBuffer->audioBuffer()->f32 + audioSamples,
+                                                 outputRawBuffer + audioSamples,
+                                                 floatsToRead - audioSamples, kHalFloatSampleLimit);
+    } else if (mConversion->mOutputAccessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
         accumulate_float(mOutBuffer->audioBuffer()->f32, outputRawBuffer, floatsToRead);
     }
 
diff --git a/media/libaudiohal/impl/EffectHalAidl.h b/media/libaudiohal/impl/EffectHalAidl.h
index bbcb7e2..4f7de7c 100644
--- a/media/libaudiohal/impl/EffectHalAidl.h
+++ b/media/libaudiohal/impl/EffectHalAidl.h
@@ -73,6 +73,7 @@
     const int32_t mSessionId;
     const int32_t mIoId;
     const bool mIsProxyEffect;
+    bool mIsHapticGenerator = false;
 
     std::unique_ptr<EffectConversionHelperAidl> mConversion;
 
diff --git a/media/libaudiohal/impl/EffectsFactoryHalAidl.cpp b/media/libaudiohal/impl/EffectsFactoryHalAidl.cpp
index 64cc7ed..2753906 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalAidl.cpp
+++ b/media/libaudiohal/impl/EffectsFactoryHalAidl.cpp
@@ -42,6 +42,8 @@
 using ::aidl::android::hardware::audio::effect::Descriptor;
 using ::aidl::android::hardware::audio::effect::IFactory;
 using ::aidl::android::hardware::audio::effect::Processing;
+using ::aidl::android::media::audio::common::AudioDevice;
+using ::aidl::android::media::audio::common::AudioDeviceAddress;
 using ::aidl::android::media::audio::common::AudioSource;
 using ::aidl::android::media::audio::common::AudioStreamType;
 using ::aidl::android::media::audio::common::AudioUuid;
@@ -174,9 +176,6 @@
     if (uuid == nullptr || effect == nullptr) {
         return BAD_VALUE;
     }
-    if (sessionId == AUDIO_SESSION_DEVICE && ioId == AUDIO_IO_HANDLE_NONE) {
-        return INVALID_OPERATION;
-    }
     ALOGV("%s session %d ioId %d", __func__, sessionId, ioId);
 
     AudioUuid aidlUuid =
@@ -284,7 +283,8 @@
 
     auto getConfigProcessingWithAidlProcessing =
             [&](const auto& aidlProcess, std::vector<effectsConfig::InputStream>& preprocess,
-                std::vector<effectsConfig::OutputStream>& postprocess) {
+                std::vector<effectsConfig::OutputStream>& postprocess,
+                std::vector<effectsConfig::DeviceEffects>& deviceprocess) {
                 if (aidlProcess.type.getTag() == Processing::Type::streamType) {
                     AudioStreamType aidlType =
                             aidlProcess.type.template get<Processing::Type::streamType>();
@@ -316,6 +316,25 @@
                     effectsConfig::InputStream stream = {.type = type.value(),
                                                          .effects = std::move(effects)};
                     preprocess.emplace_back(stream);
+                } else if (aidlProcess.type.getTag() == Processing::Type::device) {
+                    AudioDevice aidlDevice =
+                            aidlProcess.type.template get<Processing::Type::device>();
+                    std::vector<std::shared_ptr<const effectsConfig::Effect>> effects;
+                    std::transform(aidlProcess.ids.begin(), aidlProcess.ids.end(),
+                                   std::back_inserter(effects), getConfigEffectWithDescriptor);
+                    audio_devices_t type;
+                    char address[AUDIO_DEVICE_MAX_ADDRESS_LEN];
+                    status_t status = ::aidl::android::aidl2legacy_AudioDevice_audio_device(
+                            aidlDevice, &type, address);
+                    if (status != NO_ERROR) {
+                        ALOGE("%s device effect has invalid device type / address", __func__);
+                        return;
+                    }
+                    effectsConfig::DeviceEffects device = {
+                            {.type = type, .effects = std::move(effects)},
+                            .address = address,
+                    };
+                    deviceprocess.emplace_back(device);
                 }
             };
 
@@ -323,17 +342,21 @@
             [&]() -> std::shared_ptr<const effectsConfig::Processings> {
                 std::vector<effectsConfig::InputStream> preprocess;
                 std::vector<effectsConfig::OutputStream> postprocess;
+                std::vector<effectsConfig::DeviceEffects> deviceprocess;
                 for (const auto& processing : mAidlProcessings) {
-                    getConfigProcessingWithAidlProcessing(processing, preprocess, postprocess);
+                    getConfigProcessingWithAidlProcessing(processing, preprocess, postprocess,
+                                                          deviceprocess);
                 }
 
-                if (0 == preprocess.size() && 0 == postprocess.size()) {
+                if (0 == preprocess.size() && 0 == postprocess.size() &&
+                    0 == deviceprocess.size()) {
                     return nullptr;
                 }
 
                 return std::make_shared<const effectsConfig::Processings>(
                         effectsConfig::Processings({.preprocess = std::move(preprocess),
-                                                    .postprocess = std::move(postprocess)}));
+                                                    .postprocess = std::move(postprocess),
+                                                    .deviceprocess = std::move(deviceprocess)}));
             }());
 
     return processings;
diff --git a/media/libaudiohal/impl/Hal2AidlMapper.cpp b/media/libaudiohal/impl/Hal2AidlMapper.cpp
index 052522f..a01ac4b 100644
--- a/media/libaudiohal/impl/Hal2AidlMapper.cpp
+++ b/media/libaudiohal/impl/Hal2AidlMapper.cpp
@@ -929,6 +929,7 @@
                 !status.isOk()) {
             ALOGE("%s: error while resetting port config %d: %s",
                     __func__, portConfigId, status.getDescription().c_str());
+            return;
         }
         mPortConfigs.erase(it);
         return;
diff --git a/media/libaudiohal/impl/StreamHalAidl.cpp b/media/libaudiohal/impl/StreamHalAidl.cpp
index 6c0dc76..94de8ea 100644
--- a/media/libaudiohal/impl/StreamHalAidl.cpp
+++ b/media/libaudiohal/impl/StreamHalAidl.cpp
@@ -84,8 +84,8 @@
           mStream(stream),
           mVendorExt(vext),
           mLastReplyLifeTimeNs(
-                  std::min(static_cast<size_t>(100),
-                          2 * mContext.getBufferDurationMs(mConfig.sample_rate))
+                  std::min(static_cast<size_t>(20),
+                           mContext.getBufferDurationMs(mConfig.sample_rate))
                   * NANOS_PER_MILLISECOND)
 {
     ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
@@ -445,6 +445,11 @@
                    state == StreamDescriptor::State::TRANSFER_PAUSED ||
                    state == StreamDescriptor::State::DRAIN_PAUSED) {
             return sendCommand(makeHalCommand<HalCommand::Tag::start>(), reply);
+        } else if (state == StreamDescriptor::State::ACTIVE ||
+                   state == StreamDescriptor::State::TRANSFERRING ||
+                   state == StreamDescriptor::State::DRAINING) {
+            ALOGD("%s: already in stream state: %s", __func__, toString(state).c_str());
+            return OK;
         } else {
             ALOGE("%s: unexpected stream state: %s (expected IDLE or one of *PAUSED states)",
                         __func__, toString(state).c_str());
@@ -791,6 +796,14 @@
 }
 
 status_t StreamOutHalAidl::drain(bool earlyNotify) {
+    if (!mStream) return NO_INIT;
+
+    if(const auto state = getState(); state == StreamDescriptor::State::IDLE) {
+        ALOGD("%p %s stream already in IDLE state", this, __func__);
+        if(mContext.isAsynchronous()) onDrainReady();
+        return OK;
+    }
+
     return StreamHalAidl::drain(earlyNotify);
 }
 
diff --git a/media/libaudiohal/impl/StreamHalAidl.h b/media/libaudiohal/impl/StreamHalAidl.h
index 9cb2cff..0587640 100644
--- a/media/libaudiohal/impl/StreamHalAidl.h
+++ b/media/libaudiohal/impl/StreamHalAidl.h
@@ -215,6 +215,11 @@
 
     ~StreamHalAidl() override;
 
+    ::aidl::android::hardware::audio::core::StreamDescriptor::State getState() {
+        std::lock_guard l(mLock);
+        return mLastReply.state;
+    }
+
     status_t getLatency(uint32_t *latency);
 
     // Always returns non-negative values.
@@ -268,10 +273,6 @@
         result.format = config.format;
         return result;
     }
-    ::aidl::android::hardware::audio::core::StreamDescriptor::State getState() {
-        std::lock_guard l(mLock);
-        return mLastReply.state;
-    }
     // Note: Since `sendCommand` takes mLock while holding mCommandReplyLock, never call
     // it with `mLock` being held.
     status_t sendCommand(
diff --git a/media/libeffects/hapticgenerator/Android.bp b/media/libeffects/hapticgenerator/Android.bp
index 396662e..e4ac38e 100644
--- a/media/libeffects/hapticgenerator/Android.bp
+++ b/media/libeffects/hapticgenerator/Android.bp
@@ -37,6 +37,14 @@
     header_libs: [
         "libaudioeffects",
     ],
+    cflags: [
+        // This is needed for the non-zero coefficients optimization for
+        // BiquadFilter. Try the biquad_filter_benchmark test in audio_utils
+        // with/without `-ffast-math` for more context.
+        "-ffast-math",
+        "-fhonor-infinities",
+        "-fhonor-nans",
+    ],
     relative_install_path: "soundfx",
 }
 
@@ -59,12 +67,6 @@
         "-O2",
         "-Wall",
         "-Werror",
-        // This is needed for the non-zero coefficients optimization for
-        // BiquadFilter. Try the biquad_filter_benchmark test in audio_utils
-        // with/without `-ffast-math` for more context.
-        "-ffast-math",
-        "-fhonor-infinities",
-        "-fhonor-nans",
         "-fvisibility=hidden",
     ],
 }
diff --git a/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.cpp b/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.cpp
index 0a04250..9b2f443 100644
--- a/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.cpp
+++ b/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.cpp
@@ -14,30 +14,47 @@
  * limitations under the License.
  */
 
-#include <cstddef>
 #define LOG_TAG "AHAL_HapticGeneratorContext"
 
-#include <Utils.h>
+#include "HapticGeneratorContext.h"
 #include <android-base/logging.h>
 #include <android-base/parsedouble.h>
 #include <android-base/properties.h>
+#include <audio_utils/primitives.h>
+#include <audio_utils/safe_math.h>
+#include <Utils.h>
 
-#include "HapticGeneratorContext.h"
+#include <cstddef>
+
+using aidl::android::hardware::audio::common::getChannelCount;
+using aidl::android::hardware::audio::common::getPcmSampleSizeInBytes;
+using aidl::android::media::audio::common::AudioChannelLayout;
 
 namespace aidl::android::hardware::audio::effect {
 
 HapticGeneratorContext::HapticGeneratorContext(int statusDepth, const Parameter::Common& common)
     : EffectContext(statusDepth, common) {
     mState = HAPTIC_GENERATOR_STATE_UNINITIALIZED;
-    mSampleRate = common.input.base.sampleRate;
-    mFrameCount = common.input.frameCount;
-    init_params(common.input.base.channelMask, common.output.base.channelMask);
+
+    mParams.mMaxVibratorScale = HapticGenerator::VibratorScale::MUTE;
+    mParams.mVibratorInfo.resonantFrequencyHz = DEFAULT_RESONANT_FREQUENCY;
+    mParams.mVibratorInfo.qFactor = DEFAULT_BSF_ZERO_Q;
+    mParams.mVibratorInfo.maxAmplitude = 0.f;
+
+    init_params(common);
+    mState = HAPTIC_GENERATOR_STATE_INITIALIZED;
 }
 
 HapticGeneratorContext::~HapticGeneratorContext() {
     mState = HAPTIC_GENERATOR_STATE_UNINITIALIZED;
 }
 
+// Override EffectImpl::setCommon for HapticGenerator because we need init_params
+RetCode HapticGeneratorContext::setCommon(const Parameter::Common& common) {
+    init_params(common);
+    return EffectContext::setCommon(common);
+}
+
 RetCode HapticGeneratorContext::enable() {
     if (mState != HAPTIC_GENERATOR_STATE_INITIALIZED) {
         return RetCode::ERROR_EFFECT_LIB_ERROR;
@@ -75,14 +92,15 @@
     for (const auto& [id, vibratorScale] : mParams.mHapticScales) {
         mParams.mMaxVibratorScale = std::max(mParams.mMaxVibratorScale, vibratorScale);
     }
+    LOG(INFO) << " HapticGenerator VibratorScale set to " << toString(mParams.mMaxVibratorScale);
     return RetCode::SUCCESS;
 }
 
-HapticGenerator::VibratorInformation HapticGeneratorContext::getHgVibratorInformation() {
+HapticGenerator::VibratorInformation HapticGeneratorContext::getHgVibratorInformation() const {
     return mParams.mVibratorInfo;
 }
 
-std::vector<HapticGenerator::HapticScale> HapticGeneratorContext::getHgHapticScales() {
+std::vector<HapticGenerator::HapticScale> HapticGeneratorContext::getHgHapticScales() const {
     std::vector<HapticGenerator::HapticScale> result;
     for (const auto& [id, vibratorScale] : mParams.mHapticScales) {
         result.push_back({id, vibratorScale});
@@ -93,6 +111,15 @@
 RetCode HapticGeneratorContext::setHgVibratorInformation(
         const HapticGenerator::VibratorInformation& vibratorInfo) {
     mParams.mVibratorInfo = vibratorInfo;
+    if (::android::audio_utils::safe_isnan(mParams.mVibratorInfo.resonantFrequencyHz)) {
+        LOG(WARNING) << __func__ << " resonantFrequencyHz reset from nan to "
+                     << DEFAULT_RESONANT_FREQUENCY;
+        mParams.mVibratorInfo.resonantFrequencyHz = DEFAULT_RESONANT_FREQUENCY;
+    }
+    if (::android::audio_utils::safe_isnan(mParams.mVibratorInfo.qFactor)) {
+        LOG(WARNING) << __func__ << " qFactor reset from nan to " << DEFAULT_BSF_ZERO_Q;
+        mParams.mVibratorInfo.qFactor = DEFAULT_BSF_ZERO_Q;
+    }
 
     if (mProcessorsRecord.bpf != nullptr) {
         mProcessorsRecord.bpf->setCoefficients(::android::audio_effect::haptic_generator::bpfCoefs(
@@ -117,15 +144,8 @@
     auto frameSize = getInputFrameSize();
     RETURN_VALUE_IF(0 == frameSize, status, "zeroFrameSize");
 
-    // The audio data must not be modified but just written to
-    // output buffer according the access mode.
-    if (in != out) {
-        for (int i = 0; i < samples; i++) {
-            out[i] = in[i];
-        }
-    }
-
     if (mState != HAPTIC_GENERATOR_STATE_ACTIVE) {
+        LOG(WARNING) << " HapticGenerator in wrong state " << mState;
         return status;
     }
 
@@ -135,7 +155,8 @@
     }
 
     // Resize buffer if the haptic sample count is greater than buffer size.
-    size_t hapticSampleCount = mFrameCount * mParams.mHapticChannelCount;
+    const size_t hapticSampleCount = mFrameCount * mParams.mHapticChannelCount;
+    const size_t audioSampleCount = mFrameCount * mParams.mAudioChannelCount;
     if (hapticSampleCount > mInputBuffer.size()) {
         // The inputBuffer and outputBuffer must have the same size, which must be at least
         // the haptic sample count.
@@ -155,45 +176,45 @@
             runProcessingChain(mInputBuffer.data(), mOutputBuffer.data(), mFrameCount);
     ::android::os::scaleHapticData(
             hapticOutBuffer, hapticSampleCount,
-            {/*level=*/static_cast<::android::os::HapticLevel>(mParams.mMaxVibratorScale) },
-            mParams.mVibratorInfo.qFactor);
+            {static_cast<::android::os::HapticLevel>(mParams.mMaxVibratorScale)} /* scale */,
+            mParams.mVibratorInfo.maxAmplitude /* limit */);
 
     // For haptic data, the haptic playback thread will copy the data from effect input
     // buffer, which contains haptic data at the end of the buffer, directly to sink buffer.
-    // In that case, copy haptic data to input buffer instead of output buffer.
-    // Note: this may not work with rpc/binder calls
-    for (size_t i = 0; i < hapticSampleCount; ++i) {
-        in[samples + i] = hapticOutBuffer[i];
-    }
-    return {STATUS_OK, samples, static_cast<int32_t>(samples + hapticSampleCount)};
+    // In AIDL only output buffer is send back to the audio framework via FMQ. Here the effect copy
+    // the generated haptic data to the target position of output buffer, the framework then append
+    // it to the same position of input buffer.
+    memcpy_to_float_from_float_with_clamping(out + audioSampleCount, hapticOutBuffer,
+                                             hapticSampleCount, 2.f /* absMax */);
+    return {STATUS_OK, samples, samples};
 }
 
-void HapticGeneratorContext::init_params(media::audio::common::AudioChannelLayout inputChMask,
-                                         media::audio::common::AudioChannelLayout outputChMask) {
-    mParams.mMaxVibratorScale = HapticGenerator::VibratorScale::MUTE;
-    mParams.mVibratorInfo.resonantFrequencyHz = DEFAULT_RESONANT_FREQUENCY;
-    mParams.mVibratorInfo.qFactor = DEFAULT_BSF_ZERO_Q;
+void HapticGeneratorContext::init_params(const Parameter::Common& common) {
+    mSampleRate = common.input.base.sampleRate;
+    mFrameCount = common.input.frameCount;
 
     mParams.mAudioChannelCount = ::aidl::android::hardware::audio::common::getChannelCount(
-            inputChMask, ~media::audio::common::AudioChannelLayout::LAYOUT_HAPTIC_AB);
+            common.input.base.channelMask,
+            ~media::audio::common::AudioChannelLayout::LAYOUT_HAPTIC_AB);
     mParams.mHapticChannelCount = ::aidl::android::hardware::audio::common::getChannelCount(
-            outputChMask, media::audio::common::AudioChannelLayout::LAYOUT_HAPTIC_AB);
+            common.output.base.channelMask,
+            media::audio::common::AudioChannelLayout::LAYOUT_HAPTIC_AB);
     LOG_ALWAYS_FATAL_IF(mParams.mHapticChannelCount > 2, "haptic channel count is too large");
     for (int i = 0; i < mParams.mHapticChannelCount; ++i) {
         // By default, use the first audio channel to generate haptic channels.
         mParams.mHapticChannelSource[i] = 0;
     }
-
-    mState = HAPTIC_GENERATOR_STATE_INITIALIZED;
+    configure();
+    LOG(DEBUG) << " HapticGenerator init context:\n" << contextToString();
 }
 
-float HapticGeneratorContext::getDistortionOutputGain() {
+float HapticGeneratorContext::getDistortionOutputGain() const {
     float distortionOutputGain = getFloatProperty(
             "vendor.audio.hapticgenerator.distortion.output.gain", DEFAULT_DISTORTION_OUTPUT_GAIN);
     return distortionOutputGain;
 }
 
-float HapticGeneratorContext::getFloatProperty(const std::string& key, float defaultValue) {
+float HapticGeneratorContext::getFloatProperty(const std::string& key, float defaultValue) const {
     float result;
     std::string value = ::android::base::GetProperty(key, "");
     if (!value.empty() && ::android::base::ParseFloat(value, &result)) {
@@ -322,4 +343,34 @@
     return in;
 }
 
+std::string HapticGeneratorContext::paramToString(const struct HapticGeneratorParam& param) const {
+    std::stringstream ss;
+    ss << "\t\ttHapticGenerator Parameters:\n";
+    ss << "\t\t- mHapticChannelCount: " << param.mHapticChannelCount << '\n';
+    ss << "\t\t- mAudioChannelCount: " << param.mAudioChannelCount << '\n';
+    ss << "\t\t- mHapticChannelSource: " << param.mHapticChannelSource[0] << ", "
+       << param.mHapticChannelSource[1] << '\n';
+    ss << "\t\t- mMaxVibratorScale: " << ::android::internal::ToString(param.mMaxVibratorScale)
+       << '\n';
+    ss << "\t\t- mVibratorInfo: " << param.mVibratorInfo.toString() << '\n';
+    for (const auto& it : param.mHapticScales)
+        ss << "\t\t\t" << it.first << ": " << toString(it.second) << '\n';
+
+    return ss.str();
+}
+
+std::string HapticGeneratorContext::contextToString() const {
+    std::stringstream ss;
+    ss << "\t\tHapticGenerator Context:\n";
+    ss << "\t\t- state: " << mState << '\n';
+    ss << "\t\t- bpf Q: " << DEFAULT_BPF_Q << '\n';
+    ss << "\t\t- slow env normalization power: " << DEFAULT_SLOW_ENV_NORMALIZATION_POWER << '\n';
+    ss << "\t\t- distortion corner frequency: " << DEFAULT_DISTORTION_CORNER_FREQUENCY << '\n';
+    ss << "\t\t- distortion input gain: " << DEFAULT_DISTORTION_INPUT_GAIN << '\n';
+    ss << "\t\t- distortion cube threshold: " << DEFAULT_DISTORTION_CUBE_THRESHOLD << '\n';
+    ss << "\t\t- distortion output gain: " << getDistortionOutputGain() << '\n';
+    ss << "\t\tHapticGenerator Parameters:\n" << paramToString(mParams) << "\n";
+    return ss.str();
+}
+
 }  // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.h b/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.h
index 3a2ad1c..8a736e3 100644
--- a/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.h
+++ b/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.h
@@ -16,11 +16,13 @@
 
 #pragma once
 
-#include <vibrator/ExternalVibrationUtils.h>
-#include <map>
-
-#include "Processors.h"
 #include "effect-impl/EffectContext.h"
+#include "Processors.h"
+
+#include <vibrator/ExternalVibrationUtils.h>
+
+#include <cstddef>
+#include <map>
 
 namespace aidl::android::hardware::audio::effect {
 
@@ -39,7 +41,6 @@
     int mHapticChannelCount;
     int mAudioChannelCount;
 
-    HapticGenerator::HapticScale mHapticScale;
     std::map<int, HapticGenerator::VibratorScale> mHapticScales;
     // max intensity will be used to scale haptic data.
     HapticGenerator::VibratorScale mMaxVibratorScale;
@@ -69,13 +70,15 @@
     void reset();
 
     RetCode setHgHapticScales(const std::vector<HapticGenerator::HapticScale>& hapticScales);
-    std::vector<HapticGenerator::HapticScale> getHgHapticScales();
+    std::vector<HapticGenerator::HapticScale> getHgHapticScales() const;
 
     RetCode setHgVibratorInformation(const HapticGenerator::VibratorInformation& vibratorInfo);
-    HapticGenerator::VibratorInformation getHgVibratorInformation();
+    HapticGenerator::VibratorInformation getHgVibratorInformation() const;
 
     IEffect::Status process(float* in, float* out, int samples);
 
+    RetCode setCommon(const Parameter::Common& common) override;
+
   private:
     static constexpr float DEFAULT_RESONANT_FREQUENCY = 150.0f;
     static constexpr float DEFAULT_BSF_ZERO_Q = 8.0f;
@@ -108,15 +111,17 @@
     // intermediate buffer in the generating algorithm.
     std::vector<float> mOutputBuffer;
 
-    void init_params(media::audio::common::AudioChannelLayout inputChMask,
-                     media::audio::common::AudioChannelLayout outputChMask);
+    void init_params(const Parameter::Common& common);
     void configure();
 
-    float getDistortionOutputGain();
-    float getFloatProperty(const std::string& key, float defaultValue);
+    float getDistortionOutputGain() const;
+    float getFloatProperty(const std::string& key, float defaultValue) const;
     void addBiquadFilter(std::shared_ptr<HapticBiquadFilter> filter);
     void buildProcessingChain();
     float* runProcessingChain(float* buf1, float* buf2, size_t frameCount);
+
+    std::string paramToString(const struct HapticGeneratorParam& param) const;
+    std::string contextToString() const;
 };
 
 }  // namespace aidl::android::hardware::audio::effect
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 3ab32f0..736dd8a 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -1334,10 +1334,10 @@
         // cause out-of-memory due to large input buffer size. And audio recording
         // probably doesn't make sense in the scenario, since the slow-down factor
         // is probably huge (eg. mSampleRate=48K, mCaptureFps=240, mFrameRate=1).
-        const static int32_t SAMPLE_RATE_HZ_MAX = 192000;
+        const static int32_t kSampleRateHzMax = 192000;
         sourceSampleRate =
                 (mSampleRate * mCaptureFps + mFrameRate / 2) / mFrameRate;
-        if (sourceSampleRate < mSampleRate || sourceSampleRate > SAMPLE_RATE_HZ_MAX) {
+        if (sourceSampleRate < mSampleRate || sourceSampleRate > kSampleRateHzMax) {
             ALOGE("source sample rate out of range! "
                     "(mSampleRate %d, mCaptureFps %.2f, mFrameRate %d",
                     mSampleRate, mCaptureFps, mFrameRate);
diff --git a/media/libstagefright/ACodecBufferChannel.cpp b/media/libstagefright/ACodecBufferChannel.cpp
index ad42813..16e267b 100644
--- a/media/libstagefright/ACodecBufferChannel.cpp
+++ b/media/libstagefright/ACodecBufferChannel.cpp
@@ -50,7 +50,7 @@
 using namespace hardware::cas::native::V1_0;
 using DrmBufferType = hardware::drm::V1_0::BufferType;
 using BufferInfo = ACodecBufferChannel::BufferInfo;
-using BufferInfoIterator = std::vector<const BufferInfo>::const_iterator;
+using BufferInfoIterator = std::vector<BufferInfo>::const_iterator;
 
 ACodecBufferChannel::~ACodecBufferChannel() {
     if (mCrypto != nullptr && mDealer != nullptr && mHeapSeqNum >= 0) {
@@ -59,7 +59,7 @@
 }
 
 static BufferInfoIterator findClientBuffer(
-        const std::shared_ptr<const std::vector<const BufferInfo>> &array,
+        const std::shared_ptr<const std::vector<BufferInfo>> &array,
         const sp<MediaCodecBuffer> &buffer) {
     return std::find_if(
             array->begin(), array->end(),
@@ -67,7 +67,7 @@
 }
 
 static BufferInfoIterator findBufferId(
-        const std::shared_ptr<const std::vector<const BufferInfo>> &array,
+        const std::shared_ptr<const std::vector<BufferInfo>> &array,
         IOMX::buffer_id bufferId) {
     return std::find_if(
             array->begin(), array->end(),
@@ -97,7 +97,7 @@
 }
 
 status_t ACodecBufferChannel::queueInputBuffer(const sp<MediaCodecBuffer> &buffer) {
-    std::shared_ptr<const std::vector<const BufferInfo>> array(
+    std::shared_ptr<const std::vector<BufferInfo>> array(
             std::atomic_load(&mInputBuffers));
     BufferInfoIterator it = findClientBuffer(array, buffer);
     if (it == array->end()) {
@@ -138,7 +138,7 @@
     if (!hasCryptoOrDescrambler() || mDealer == nullptr) {
         return -ENOSYS;
     }
-    std::shared_ptr<const std::vector<const BufferInfo>> array(
+    std::shared_ptr<const std::vector<BufferInfo>> array(
             std::atomic_load(&mInputBuffers));
     BufferInfoIterator it = findClientBuffer(array, buffer);
     if (it == array->end()) {
@@ -352,7 +352,7 @@
         size_t numSubSamples,
         const sp<MediaCodecBuffer> &buffer,
         AString* errorDetailMsg) {
-    std::shared_ptr<const std::vector<const BufferInfo>> array(
+    std::shared_ptr<const std::vector<BufferInfo>> array(
             std::atomic_load(&mInputBuffers));
     BufferInfoIterator it = findClientBuffer(array, buffer);
     if (it == array->end()) {
@@ -473,7 +473,7 @@
 
 status_t ACodecBufferChannel::renderOutputBuffer(
         const sp<MediaCodecBuffer> &buffer, int64_t timestampNs) {
-    std::shared_ptr<const std::vector<const BufferInfo>> array(
+    std::shared_ptr<const std::vector<BufferInfo>> array(
             std::atomic_load(&mOutputBuffers));
     BufferInfoIterator it = findClientBuffer(array, buffer);
     if (it == array->end()) {
@@ -495,7 +495,7 @@
 }
 
 status_t ACodecBufferChannel::discardBuffer(const sp<MediaCodecBuffer> &buffer) {
-    std::shared_ptr<const std::vector<const BufferInfo>> array(
+    std::shared_ptr<const std::vector<BufferInfo>> array(
             std::atomic_load(&mInputBuffers));
     bool input = true;
     BufferInfoIterator it = findClientBuffer(array, buffer);
@@ -517,7 +517,7 @@
 }
 
 void ACodecBufferChannel::getInputBufferArray(Vector<sp<MediaCodecBuffer>> *array) {
-    std::shared_ptr<const std::vector<const BufferInfo>> inputBuffers(
+    std::shared_ptr<const std::vector<BufferInfo>> inputBuffers(
             std::atomic_load(&mInputBuffers));
     array->clear();
     for (const BufferInfo &elem : *inputBuffers) {
@@ -526,7 +526,7 @@
 }
 
 void ACodecBufferChannel::getOutputBufferArray(Vector<sp<MediaCodecBuffer>> *array) {
-    std::shared_ptr<const std::vector<const BufferInfo>> outputBuffers(
+    std::shared_ptr<const std::vector<BufferInfo>> outputBuffers(
             std::atomic_load(&mOutputBuffers));
     array->clear();
     for (const BufferInfo &elem : *outputBuffers) {
@@ -583,7 +583,7 @@
             mDecryptDestination = mDealer->allocate(destinationBufferSize);
         }
     }
-    std::vector<const BufferInfo> inputBuffers;
+    std::vector<BufferInfo> inputBuffers;
     for (const BufferAndId &elem : array) {
         sp<IMemory> sharedEncryptedBuffer;
         if (hasCryptoOrDescrambler()) {
@@ -593,22 +593,22 @@
     }
     std::atomic_store(
             &mInputBuffers,
-            std::make_shared<const std::vector<const BufferInfo>>(inputBuffers));
+            std::make_shared<const std::vector<BufferInfo>>(inputBuffers));
 }
 
 void ACodecBufferChannel::setOutputBufferArray(const std::vector<BufferAndId> &array) {
-    std::vector<const BufferInfo> outputBuffers;
+    std::vector<BufferInfo> outputBuffers;
     for (const BufferAndId &elem : array) {
         outputBuffers.emplace_back(elem.mBuffer, elem.mBufferId, nullptr);
     }
     std::atomic_store(
             &mOutputBuffers,
-            std::make_shared<const std::vector<const BufferInfo>>(outputBuffers));
+            std::make_shared<const std::vector<BufferInfo>>(outputBuffers));
 }
 
 void ACodecBufferChannel::fillThisBuffer(IOMX::buffer_id bufferId) {
     ALOGV("fillThisBuffer #%d", bufferId);
-    std::shared_ptr<const std::vector<const BufferInfo>> array(
+    std::shared_ptr<const std::vector<BufferInfo>> array(
             std::atomic_load(&mInputBuffers));
     BufferInfoIterator it = findBufferId(array, bufferId);
 
@@ -629,7 +629,7 @@
         IOMX::buffer_id bufferId,
         OMX_U32 omxFlags) {
     ALOGV("drainThisBuffer #%d", bufferId);
-    std::shared_ptr<const std::vector<const BufferInfo>> array(
+    std::shared_ptr<const std::vector<BufferInfo>> array(
             std::atomic_load(&mOutputBuffers));
     BufferInfoIterator it = findBufferId(array, bufferId);
 
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index e918b5e..15188b0 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -72,6 +72,9 @@
 static const int64_t kMaxMetadataSize = 0x4000000LL;   // 64MB max per-frame metadata size
 static const int64_t kMaxCttsOffsetTimeUs = 30 * 60 * 1000000LL;  // 30 minutes
 static const size_t kESDSScratchBufferSize = 10;  // kMaxAtomSize in Mpeg4Extractor 64MB
+// Allow up to 100 milli second, which is safely above the maximum delay observed in manual testing
+// between posting from setNextFd and handling it
+static const int64_t kFdCondWaitTimeoutNs = 100000000;
 
 static const char kMetaKey_Version[]    = "com.android.version";
 static const char kMetaKey_Manufacturer[]      = "com.android.manufacturer";
@@ -1262,9 +1265,13 @@
         return OK;
     }
 
+    // Wait for the signal only if the new file is not available.
     if (mNextFd == -1) {
-        ALOGW("No FileDescriptor for next recording");
-        return INVALID_OPERATION;
+        status_t res = mFdCond.waitRelative(mLock, kFdCondWaitTimeoutNs);
+        if (res != OK) {
+            ALOGW("No FileDescriptor for next recording");
+            return INVALID_OPERATION;
+        }
     }
 
     mSwitchPending = true;
@@ -2433,6 +2440,7 @@
         return INVALID_OPERATION;
     }
     mNextFd = dup(fd);
+    mFdCond.signal();
     return OK;
 }
 
@@ -4886,8 +4894,15 @@
             int32_t mediaTime = (mFirstSampleStartOffsetUs * mTimeScale + 5E5) / 1E6;
             int32_t firstSampleOffsetTicks =
                     (mFirstSampleStartOffsetUs * mvhdTimeScale + 5E5) / 1E6;
-            // samples before 0 don't count in for duration, hence subtract firstSampleOffsetTicks.
-            addOneElstTableEntry(tkhdDurationTicks - firstSampleOffsetTicks, mediaTime, 1, 0);
+            if (tkhdDurationTicks >= firstSampleOffsetTicks) {
+                // samples before 0 don't count in for duration, hence subtract
+                // firstSampleOffsetTicks.
+                addOneElstTableEntry(tkhdDurationTicks - firstSampleOffsetTicks, mediaTime, 1, 0);
+            } else {
+                ALOGW("The track header duration %" PRId64
+                      " is smaller than the first sample offset %" PRId64,
+                      mTrackDurationUs, mFirstSampleStartOffsetUs);
+            }
         } else {
             // Track starting at zero.
             ALOGV("No edit list entry required for this track");
diff --git a/media/libstagefright/include/ACodecBufferChannel.h b/media/libstagefright/include/ACodecBufferChannel.h
index 946d533..46a5183 100644
--- a/media/libstagefright/include/ACodecBufferChannel.h
+++ b/media/libstagefright/include/ACodecBufferChannel.h
@@ -155,8 +155,8 @@
     // obtained. Inside BufferInfo, mBufferId and mSharedEncryptedBuffer are
     // immutable objects. We write internal states of mClient/CodecBuffer when
     // the caller has given up the reference, so that access is also safe.
-    std::shared_ptr<const std::vector<const BufferInfo>> mInputBuffers;
-    std::shared_ptr<const std::vector<const BufferInfo>> mOutputBuffers;
+    std::shared_ptr<const std::vector<BufferInfo>> mInputBuffers;
+    std::shared_ptr<const std::vector<BufferInfo>> mOutputBuffers;
 
     sp<MemoryDealer> makeMemoryDealer(size_t heapSize);
 
diff --git a/media/libstagefright/include/media/stagefright/CameraSource.h b/media/libstagefright/include/media/stagefright/CameraSource.h
index fcd17b9..f42e315 100644
--- a/media/libstagefright/include/media/stagefright/CameraSource.h
+++ b/media/libstagefright/include/media/stagefright/CameraSource.h
@@ -236,7 +236,7 @@
     // Start of members protected by mBatchLock
     std::deque<uint32_t> mInflightBatchSizes;
     std::vector<native_handle_t*> mInflightReturnedHandles;
-    std::vector<const sp<IMemory>> mInflightReturnedMemorys;
+    std::vector<sp<IMemory>> mInflightReturnedMemorys;
     // End of members protected by mBatchLock
 
     void releaseQueuedFrames();
diff --git a/media/libstagefright/include/media/stagefright/MPEG4Writer.h b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
index 054a4b8..ee75129 100644
--- a/media/libstagefright/include/media/stagefright/MPEG4Writer.h
+++ b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
@@ -144,6 +144,7 @@
     std::mutex mFallocMutex;
     bool mPreAllocFirstTime; // Pre-allocate space for file and track headers only once per file.
     uint64_t mPrevAllTracksTotalMetaDataSizeEstimate;
+    Condition mFdCond;
 
     List<Track *> mTracks;
 
diff --git a/media/libstagefright/webm/WebmWriter.cpp b/media/libstagefright/webm/WebmWriter.cpp
index ca862b0..151ce7c 100644
--- a/media/libstagefright/webm/WebmWriter.cpp
+++ b/media/libstagefright/webm/WebmWriter.cpp
@@ -290,7 +290,7 @@
     // Max file duration limit is set
     if (mMaxFileDurationLimitUs != 0) {
         if (bitRate > 0) {
-            int64_t size2 = ((mMaxFileDurationLimitUs * bitRate * 6) / 1000 / 8000000);
+            int64_t size2 = ((mMaxFileDurationLimitUs / 1000) * bitRate * 6) / 8000000;
             if (mMaxFileSizeLimitBytes != 0 && mIsFileSizeLimitExplicitlyRequested) {
                 // When both file size and duration limits are set,
                 // we use the smaller limit of the two.
diff --git a/media/module/bufferpool/2.0/AccessorImpl.cpp b/media/module/bufferpool/2.0/AccessorImpl.cpp
index b9483bf..3d7f0c7 100644
--- a/media/module/bufferpool/2.0/AccessorImpl.cpp
+++ b/media/module/bufferpool/2.0/AccessorImpl.cpp
@@ -923,7 +923,7 @@
         std::map<const std::weak_ptr<Accessor::Impl>, nsecs_t, std::owner_less<>> &accessors,
         std::mutex &mutex,
         std::condition_variable &cv) {
-    std::list<const std::weak_ptr<Accessor::Impl>> evictList;
+    std::list<std::weak_ptr<Accessor::Impl>> evictList;
     while (true) {
         int expired = 0;
         int evicted = 0;
diff --git a/media/module/extractors/mp4/MPEG4Extractor.cpp b/media/module/extractors/mp4/MPEG4Extractor.cpp
index b3707c8..f247f8c 100644
--- a/media/module/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/module/extractors/mp4/MPEG4Extractor.cpp
@@ -523,11 +523,10 @@
     }
 
     [this, &track] {
-        int64_t duration;
+        int64_t duration = track->mMdhdDurationUs;
         int32_t samplerate;
         // Only for audio track.
-        if (track->elst_needs_processing && mHeaderTimescale != 0 &&
-            AMediaFormat_getInt64(track->meta, AMEDIAFORMAT_KEY_DURATION, &duration) &&
+        if (track->elst_needs_processing && mHeaderTimescale != 0 && duration != 0 &&
             AMediaFormat_getInt32(track->meta, AMEDIAFORMAT_KEY_SAMPLE_RATE, &samplerate)) {
             // Elst has to be processed only the first time this function is called.
             track->elst_needs_processing = false;
@@ -1645,7 +1644,10 @@
                           (long long) duration, (long long) mLastTrack->timescale);
                     return ERROR_MALFORMED;
                 }
-                AMediaFormat_setInt64(mLastTrack->meta, AMEDIAFORMAT_KEY_DURATION, durationUs);
+                // Store this track's mdhd duration to calculate the padding.
+                mLastTrack->mMdhdDurationUs = (int64_t)durationUs;
+            } else {
+                mLastTrack->mMdhdDurationUs = 0;
             }
 
             uint8_t lang[2];
@@ -3907,17 +3909,18 @@
     }
 
     int32_t id;
+    int64_t duration;
 
     if (version == 1) {
         // we can get ctime value from U64_AT(&buffer[4])
         // we can get mtime value from U64_AT(&buffer[12])
         id = U32_AT(&buffer[20]);
-        // we can get duration value from U64_AT(&buffer[28])
+        duration = U64_AT(&buffer[28]);
     } else if (version == 0) {
         // we can get ctime value from U32_AT(&buffer[4])
         // we can get mtime value from U32_AT(&buffer[8])
         id = U32_AT(&buffer[12]);
-        // we can get duration value from U32_AT(&buffer[20])
+        duration = U32_AT(&buffer[20]);
     } else {
         return ERROR_UNSUPPORTED;
     }
@@ -3926,6 +3929,15 @@
         return ERROR_MALFORMED;
 
     AMediaFormat_setInt32(mLastTrack->meta, AMEDIAFORMAT_KEY_TRACK_ID, id);
+    if (duration != 0 && mHeaderTimescale != 0) {
+        long double durationUs = ((long double)duration * 1000000) / mHeaderTimescale;
+        if (durationUs < 0 || durationUs > INT64_MAX) {
+            ALOGE("cannot represent %lld * 1000000 / %lld in 64 bits",
+                  (long long) duration, (long long) mHeaderTimescale);
+            return ERROR_MALFORMED;
+        }
+        AMediaFormat_setInt64(mLastTrack->meta, AMEDIAFORMAT_KEY_DURATION, durationUs);
+    }
 
     size_t matrixOffset = dynSize + 16;
     int32_t a00 = U32_AT(&buffer[matrixOffset]);
diff --git a/media/module/extractors/mp4/include/MPEG4Extractor.h b/media/module/extractors/mp4/include/MPEG4Extractor.h
index 542a3e6..59626f6 100644
--- a/media/module/extractors/mp4/include/MPEG4Extractor.h
+++ b/media/module/extractors/mp4/include/MPEG4Extractor.h
@@ -96,7 +96,7 @@
 
         uint8_t *mTx3gBuffer;
         size_t mTx3gSize, mTx3gFilled;
-
+        int64_t mMdhdDurationUs;
 
         Track() {
             next = NULL;
diff --git a/media/ndk/include/media/NdkMediaDataSource.h b/media/ndk/include/media/NdkMediaDataSource.h
index 197e202..def142c 100644
--- a/media/ndk/include/media/NdkMediaDataSource.h
+++ b/media/ndk/include/media/NdkMediaDataSource.h
@@ -49,16 +49,16 @@
 /*
  * AMediaDataSource's callbacks will be invoked on an implementation-defined thread
  * or thread pool. No guarantees are provided about which thread(s) will be used for
- * callbacks. For example, |close| can be invoked from a different thread than the
- * thread invoking |readAt|. As such, the Implementations of AMediaDataSource callbacks
+ * callbacks. For example, `close` can be invoked from a different thread than the
+ * thread invoking `readAt`. As such, the Implementations of AMediaDataSource callbacks
  * must be threadsafe.
  */
 
 /**
- * Called to request data from the given |offset|.
+ * Called to request data from the given `offset`.
  *
- * Implementations should should write up to |size| bytes into
- * |buffer|, and return the number of bytes written.
+ * Implementations should should write up to `size` bytes into
+ * `buffer`, and return the number of bytes written.
  *
  * Return 0 if size is zero (thus no bytes are read).
  *
@@ -78,9 +78,9 @@
  * Called to close the data source, unblock reads, and release associated
  * resources.
  *
- * The NDK media framework guarantees that after the first |close| is
+ * The NDK media framework guarantees that after the first `close` is
  * called, no future callbacks will be invoked on the data source except
- * for |close| itself.
+ * for `close` itself.
  *
  * Closing a data source allows readAt calls that were blocked waiting
  * for I/O data to return promptly.
@@ -101,7 +101,7 @@
 
 /**
  * Called to get an estimate of the number of bytes that can be read from this data source
- * starting at |offset| without blocking for I/O.
+ * starting at `offset` without blocking for I/O.
  *
  * Return -1 when such an estimate is not possible.
  */
@@ -111,10 +111,10 @@
  * Create new media data source. Returns NULL if memory allocation
  * for the new data source object fails.
  *
- * Set the |uri| from which the data source will read,
+ * Set the `uri` from which the data source will read,
  * plus additional http headers when initiating the request.
  *
- * Headers will contain corresponding items from |key_values|
+ * Headers will contain corresponding items from `key_values`
  * in the following fashion:
  *
  * key_values[0]:key_values[1]
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 7933ef8..1f4e16e 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -1082,6 +1082,7 @@
         client = registerPid(clientPid);
 
         IAfPlaybackThread* effectThread = nullptr;
+        sp<IAfEffectChain> effectChain = nullptr;
         // check if an effect chain with the same session ID is present on another
         // output thread and move it here.
         for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
@@ -1094,6 +1095,10 @@
                 }
             }
         }
+        // Check if an orphan effect chain exists for this session
+        if (effectThread == nullptr) {
+            effectChain = getOrphanEffectChain_l(sessionId);
+        }
         ALOGV("createTrack() sessionId: %d", sessionId);
 
         output.sampleRate = input.config.sample_rate;
@@ -1138,6 +1143,13 @@
                     effectIds = thread->getEffectIds_l(sessionId);
                 }
             }
+            if (effectChain != nullptr) {
+                if (moveEffectChain_ll(sessionId, nullptr, thread, effectChain.get())
+                        == NO_ERROR) {
+                    effectThreadId = thread->id();
+                    effectIds = thread->getEffectIds_l(sessionId);
+                }
+            }
         }
 
         // Look for sync events awaiting for a session to be used.
@@ -3116,6 +3128,25 @@
 
 
             mPlaybackThreads.removeItem(output);
+            // Save AUDIO_SESSION_OUTPUT_MIX effect to orphan chains
+            // Output Mix Effect session is used to manage Music Effect by AudioPolicy Manager.
+            // It exists across all playback threads.
+            if (playbackThread->type() == IAfThreadBase::MIXER
+                    || playbackThread->type() == IAfThreadBase::OFFLOAD
+                    || playbackThread->type() == IAfThreadBase::SPATIALIZER) {
+                sp<IAfEffectChain> mixChain;
+                {
+                    audio_utils::scoped_lock sl(playbackThread->mutex());
+                    mixChain = playbackThread->getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX);
+                    if (mixChain != nullptr) {
+                        ALOGW("%s() output %d moving mix session to orphans", __func__, output);
+                        playbackThread->removeEffectChain_l(mixChain);
+                    }
+                }
+                if (mixChain != nullptr) {
+                    putOrphanEffectChain_l(mixChain);
+                }
+            }
             // save all effects to the default thread
             if (mPlaybackThreads.size()) {
                 IAfPlaybackThread* const dstThread =
@@ -3781,7 +3812,11 @@
 
 IAfPlaybackThread* AudioFlinger::primaryPlaybackThread_l() const
 {
-    audio_utils::lock_guard lock(hardwareMutex());
+    // The atomic ptr mPrimaryHardwareDev requires both the
+    // AudioFlinger and the Hardware mutex for modification.
+    // As we hold the AudioFlinger mutex, we access it
+    // safely without the Hardware mutex, to avoid mutex order
+    // inversion with Thread methods and the ThreadBase mutex.
     if (mPrimaryHardwareDev == nullptr) {
         return nullptr;
     }
@@ -4261,7 +4296,9 @@
             // before creating the AudioEffect or the io handle must be specified.
             //
             // Detect if the effect is created after an AudioRecord is destroyed.
-            if (getOrphanEffectChain_l(sessionId).get() != nullptr) {
+            if (sessionId != AUDIO_SESSION_OUTPUT_MIX
+                  && ((descOut.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC)
+                  && getOrphanEffectChain_l(sessionId).get() != nullptr) {
                 ALOGE("%s: effect %s with no specified io handle is denied because the AudioRecord"
                       " for session %d no longer exists",
                       __func__, descOut.name, sessionId);
@@ -4272,11 +4309,27 @@
             // Legacy handling of creating an effect on an expired or made-up
             // session id.  We think that it is a Playback effect.
             //
-            // If no output thread contains the requested session ID, default to
-            // first output. The effect chain will be moved to the correct output
-            // thread when a track with the same session ID is created
-            if (io == AUDIO_IO_HANDLE_NONE && mPlaybackThreads.size() > 0) {
-                io = mPlaybackThreads.keyAt(0);
+            // If no output thread contains the requested session ID, park the effect to
+            // the orphan chains. The effect chain will be moved to the correct output
+            // thread when a track with the same session ID is created.
+            if (io == AUDIO_IO_HANDLE_NONE) {
+                if (probe) {
+                    // In probe mode, as no compatible thread found, exit with error.
+                    lStatus = BAD_VALUE;
+                    goto Exit;
+                }
+                ALOGV("%s() got io %d for effect %s", __func__, io, descOut.name);
+                sp<Client> client = registerPid(currentPid);
+                bool pinned = !audio_is_global_session(sessionId) && isSessionAcquired_l(sessionId);
+                handle = createOrphanEffect_l(client, effectClient, priority, sessionId,
+                                              &descOut, &enabledOut, &lStatus, pinned,
+                                              request.notifyFramesProcessed);
+                if (lStatus != NO_ERROR && lStatus != ALREADY_EXISTS) {
+                    // remove local strong reference to Client with clientMutex() held
+                    audio_utils::lock_guard _cl(clientMutex());
+                    client.clear();
+                }
+                goto Register;
             }
             ALOGV("createEffect() got io %d for effect %s", io, descOut.name);
         } else if (checkPlaybackThread_l(io) != nullptr
@@ -4317,7 +4370,8 @@
                     goto Exit;
                 }
             }
-        } else {
+        }
+        if (thread->type() == IAfThreadBase::RECORD || sessionId == AUDIO_SESSION_OUTPUT_MIX) {
             // Check if one effect chain was awaiting for an effect to be created on this
             // session and used it instead of creating a new one.
             sp<IAfEffectChain> chain = getOrphanEffectChain_l(sessionId);
@@ -4393,6 +4447,85 @@
     return lStatus;
 }
 
+sp<IAfEffectHandle> AudioFlinger::createOrphanEffect_l(
+        const sp<Client>& client,
+        const sp<IEffectClient>& effectClient,
+        int32_t priority,
+        audio_session_t sessionId,
+        effect_descriptor_t *desc,
+        int *enabled,
+        status_t *status,
+        bool pinned,
+        bool notifyFramesProcessed)
+{
+    ALOGV("%s effectClient %p, priority %d, sessionId %d, factory %p",
+          __func__, effectClient.get(), priority, sessionId, mEffectsFactoryHal.get());
+
+    // Check if an orphan effect chain exists for this session or create new chain for this session
+    sp<IAfEffectModule> effect;
+    sp<IAfEffectChain> chain = getOrphanEffectChain_l(sessionId);
+    bool chainCreated = false;
+    if (chain == nullptr) {
+        chain = IAfEffectChain::create(/* ThreadBase= */ nullptr, sessionId, this);
+        chainCreated = true;
+    } else {
+        effect = chain->getEffectFromDesc(desc);
+    }
+    bool effectCreated = false;
+    if (effect == nullptr) {
+        audio_unique_id_t effectId = nextUniqueId(AUDIO_UNIQUE_ID_USE_EFFECT);
+        // create a new effect module if none present in the chain
+        status_t llStatus =
+                chain->createEffect(effect, desc, effectId, sessionId, pinned);
+        if (llStatus != NO_ERROR) {
+            *status = llStatus;
+            // if the effect chain was not created here, put it back
+            if (!chainCreated) {
+                putOrphanEffectChain_l(chain);
+            }
+            return nullptr;
+        }
+        effect->setMode(getMode());
+
+        if (effect->isHapticGenerator()) {
+            // TODO(b/184194057): Use the vibrator information from the vibrator that will be used
+            // for the HapticGenerator.
+            const std::optional<media::AudioVibratorInfo> defaultVibratorInfo =
+                    std::move(getDefaultVibratorInfo_l());
+            if (defaultVibratorInfo) {
+                // Only set the vibrator info when it is a valid one.
+                audio_utils::lock_guard _cl(chain->mutex());
+                effect->setVibratorInfo_l(*defaultVibratorInfo);
+            }
+        }
+        effectCreated = true;
+    }
+    // create effect handle and connect it to effect module
+    sp<IAfEffectHandle> handle =
+            IAfEffectHandle::create(effect, client, effectClient, priority, notifyFramesProcessed);
+    status_t lStatus = handle->initCheck();
+    if (lStatus == OK) {
+        lStatus = effect->addHandle(handle.get());
+    }
+    // in case of lStatus error, EffectHandle will still return and caller should do the clear
+    if (lStatus != NO_ERROR && lStatus != ALREADY_EXISTS) {
+        if (effectCreated) {
+            chain->removeEffect(effect);
+        }
+        // if the effect chain was not created here, put it back
+        if (!chainCreated) {
+            putOrphanEffectChain_l(chain);
+        }
+    } else {
+        if (enabled != NULL) {
+            *enabled = (int)effect->isEnabled();
+        }
+        putOrphanEffectChain_l(chain);
+    }
+    *status = lStatus;
+    return handle;
+}
+
 status_t AudioFlinger::moveEffects(audio_session_t sessionId, audio_io_handle_t srcIo,
         audio_io_handle_t dstIo)
 NO_THREAD_SAFETY_ANALYSIS
@@ -4421,17 +4554,39 @@
         }
         return ret;
     }
-    IAfPlaybackThread* const srcThread = checkPlaybackThread_l(srcIo);
-    if (srcThread == nullptr) {
-        ALOGW("%s() bad srcIo %d", __func__, srcIo);
-        return BAD_VALUE;
-    }
-    IAfPlaybackThread* const dstThread = checkPlaybackThread_l(dstIo);
+
+    IAfPlaybackThread* dstThread = checkPlaybackThread_l(dstIo);
     if (dstThread == nullptr) {
         ALOGW("%s() bad dstIo %d", __func__, dstIo);
         return BAD_VALUE;
     }
 
+    IAfPlaybackThread* srcThread = checkPlaybackThread_l(srcIo);
+    sp<IAfEffectChain> orphanChain = getOrphanEffectChain_l(sessionId);
+    if (srcThread == nullptr && orphanChain == nullptr && sessionId == AUDIO_SESSION_OUTPUT_MIX) {
+        ALOGW("%s() AUDIO_SESSION_OUTPUT_MIX not found in orphans, checking other mix", __func__);
+        for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+            const sp<IAfPlaybackThread> pt = mPlaybackThreads.valueAt(i);
+            const uint32_t sessionType = pt->hasAudioSession(AUDIO_SESSION_OUTPUT_MIX);
+            if ((pt->type() == IAfThreadBase::MIXER || pt->type() == IAfThreadBase::OFFLOAD) &&
+                    ((sessionType & IAfThreadBase::EFFECT_SESSION) != 0)) {
+                srcThread = pt.get();
+                ALOGW("%s() found srcOutput %d hosting AUDIO_SESSION_OUTPUT_MIX", __func__,
+                      pt->id());
+                break;
+            }
+        }
+    }
+    if (srcThread == nullptr && orphanChain == nullptr) {
+        ALOGW("moveEffects() bad srcIo %d", srcIo);
+        return BAD_VALUE;
+    }
+    // dstThread pointer validity has already been checked
+    if (orphanChain != nullptr) {
+        audio_utils::scoped_lock _ll(dstThread->mutex());
+        return moveEffectChain_ll(sessionId, nullptr, dstThread, orphanChain.get());
+    }
+    // srcThread pointer validity has already been checked
     audio_utils::scoped_lock _ll(dstThread->mutex(), srcThread->mutex());
     return moveEffectChain_ll(sessionId, srcThread, dstThread);
 }
@@ -4457,12 +4612,17 @@
 // moveEffectChain_ll must be called with the AudioFlinger::mutex()
 // and both srcThread and dstThread mutex()s held
 status_t AudioFlinger::moveEffectChain_ll(audio_session_t sessionId,
-        IAfPlaybackThread* srcThread, IAfPlaybackThread* dstThread)
+        IAfPlaybackThread* srcThread, IAfPlaybackThread* dstThread,
+        IAfEffectChain* srcChain)
 {
-    ALOGV("%s: session %d from thread %p to thread %p",
-            __func__, sessionId, srcThread, dstThread);
+    ALOGV("%s: session %d from thread %p to thread %p %s",
+            __func__, sessionId, srcThread, dstThread,
+            (srcChain != nullptr ? "from specific chain" : ""));
+    ALOG_ASSERT((srcThread != nullptr) != (srcChain != nullptr),
+                "no source provided for source chain");
 
-    sp<IAfEffectChain> chain = srcThread->getEffectChain_l(sessionId);
+    sp<IAfEffectChain> chain =
+          srcChain != nullptr ? srcChain : srcThread->getEffectChain_l(sessionId);
     if (chain == 0) {
         ALOGW("%s: effect chain for session %d not on source thread %p",
                 __func__, sessionId, srcThread);
@@ -4482,8 +4642,9 @@
     // otherwise unnecessary as removeEffect_l() will remove the chain when last effect is
     // removed.
     // TODO(b/216875016): consider holding the effect chain locks for the duration of the move.
-    srcThread->removeEffectChain_l(chain);
-
+    if (srcThread != nullptr) {
+        srcThread->removeEffectChain_l(chain);
+    }
     // transfer all effects one by one so that new effect chain is created on new thread with
     // correct buffer sizes and audio parameters and effect engines reconfigured accordingly
     sp<IAfEffectChain> dstChain;
@@ -4493,7 +4654,11 @@
     // process effects one by one.
     for (sp<IAfEffectModule> effect = chain->getEffectFromId_l(0); effect != nullptr;
             effect = chain->getEffectFromId_l(0)) {
-        srcThread->removeEffect_l(effect);
+        if (srcThread != nullptr) {
+            srcThread->removeEffect_l(effect);
+        } else {
+            chain->removeEffect(effect);
+        }
         removed.add(effect);
         status = dstThread->addEffect_ll(effect);
         if (status != NO_ERROR) {
@@ -4521,7 +4686,7 @@
         for (const auto& effect : removed) {
             dstThread->removeEffect_l(effect); // Note: Depending on error location, the last
                                                // effect may not have been placed on dstThread.
-            if (srcThread->addEffect_ll(effect) == NO_ERROR) {
+            if (srcThread != nullptr && srcThread->addEffect_ll(effect) == NO_ERROR) {
                 ++restored;
                 if (dstChain == nullptr) {
                     dstChain = effect->getCallback()->chain().promote();
@@ -4552,15 +4717,19 @@
         if (errorString.empty()) {
             errorString = StringPrintf("%s: failed status %d", __func__, status);
         }
-        ALOGW("%s: %s unsuccessful move of session %d from srcThread %p to dstThread %p "
+        ALOGW("%s: %s unsuccessful move of session %d from %s %p to dstThread %p "
                 "(%zu effects removed from srcThread, %zu effects restored to srcThread, "
                 "%zu effects started)",
-                __func__, errorString.c_str(), sessionId, srcThread, dstThread,
+                __func__, errorString.c_str(), sessionId,
+                (srcThread != nullptr ? "srcThread" : "srcChain"),
+                (srcThread != nullptr ? (void*) srcThread : (void*) srcChain), dstThread,
                 removed.size(), restored, started);
     } else {
-        ALOGD("%s: successful move of session %d from srcThread %p to dstThread %p "
+        ALOGD("%s: successful move of session %d from %s %p to dstThread %p "
                 "(%zu effects moved, %zu effects started)",
-                __func__, sessionId, srcThread, dstThread, removed.size(), started);
+                __func__, sessionId, (srcThread != nullptr ? "srcThread" : "srcChain"),
+                (srcThread != nullptr ? (void*) srcThread : (void*) srcChain), dstThread,
+                removed.size(), started);
     }
     return status;
 }
@@ -4731,7 +4900,7 @@
     ALOGV("updateOrphanEffectChains session %d index %zd", session, index);
     if (index >= 0) {
         sp<IAfEffectChain> chain = mOrphanEffectChains.valueAt(index);
-        if (chain->removeEffect_l(effect, true) == 0) {
+        if (chain->removeEffect(effect, true) == 0) {
             ALOGV("updateOrphanEffectChains removing effect chain at index %zd", index);
             mOrphanEffectChains.removeItemsAt(index);
         }
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 3885465..f08e78e 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -375,7 +375,8 @@
             EXCLUDES_AudioFlinger_Mutex;
 
     status_t moveEffectChain_ll(audio_session_t sessionId,
-            IAfPlaybackThread* srcThread, IAfPlaybackThread* dstThread) final
+            IAfPlaybackThread* srcThread, IAfPlaybackThread* dstThread,
+            IAfEffectChain* srcChain = nullptr) final
             REQUIRES(mutex(), audio_utils::ThreadBase_Mutex);
 
     // This is a helper that is called during incoming binder calls.
@@ -710,6 +711,16 @@
 
     sp<Client> registerPid(pid_t pid) EXCLUDES_AudioFlinger_ClientMutex; // always returns non-0
 
+    sp<IAfEffectHandle> createOrphanEffect_l(const sp<Client>& client,
+                                          const sp<media::IEffectClient>& effectClient,
+                                          int32_t priority,
+                                          audio_session_t sessionId,
+                                          effect_descriptor_t *desc,
+                                          int *enabled,
+                                          status_t *status /*non-NULL*/,
+                                          bool pinned,
+                                          bool notifyFramesProcessed) REQUIRES(mutex());
+
     // for use from destructor
     status_t closeOutput_nonvirtual(audio_io_handle_t output) EXCLUDES_AudioFlinger_Mutex;
     status_t closeInput_nonvirtual(audio_io_handle_t input) EXCLUDES_AudioFlinger_Mutex;
diff --git a/services/audioflinger/DeviceEffectManager.cpp b/services/audioflinger/DeviceEffectManager.cpp
index feae97e..7cb9329 100644
--- a/services/audioflinger/DeviceEffectManager.cpp
+++ b/services/audioflinger/DeviceEffectManager.cpp
@@ -71,10 +71,15 @@
 
 void DeviceEffectManager::onReleaseAudioPatch(audio_patch_handle_t handle) {
     ALOGV("%s", __func__);
+    // Keep a reference on disconnected handle to delay destruction without lock held.
+    std::vector<sp<IAfEffectHandle>> disconnectedHandles{};
     audio_utils::lock_guard _l(mutex());
     for (auto& effectProxies : mDeviceEffects) {
         for (auto& effect : effectProxies.second) {
-            effect->onReleasePatch(handle);
+            sp<IAfEffectHandle> disconnectedHandle = effect->onReleasePatch(handle);
+            if (disconnectedHandle != nullptr) {
+                disconnectedHandles.push_back(std::move(disconnectedHandle));
+            }
         }
     }
 }
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 3e8b2af..a6a2cdf 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -562,12 +562,9 @@
 #undef LOG_TAG
 #define LOG_TAG "EffectModule"
 
-EffectModule::EffectModule(const sp<EffectCallbackInterface>& callback,
-                                         effect_descriptor_t *desc,
-                                         int id,
-                                         audio_session_t sessionId,
-                                         bool pinned,
-                                         audio_port_handle_t deviceId)
+EffectModule::EffectModule(const sp<EffectCallbackInterface>& callback, effect_descriptor_t* desc,
+                           int id, audio_session_t sessionId, bool pinned,
+                           audio_port_handle_t deviceId)
     : EffectBase(callback, desc, id, sessionId, pinned),
       // clear mConfig to ensure consistent initial value of buffer framecount
       // in case buffers are associated by setInBuffer() or setOutBuffer()
@@ -577,9 +574,9 @@
       mMaxDisableWaitCnt(1), // set by configure_l(), should be >= 1
       mDisableWaitCnt(0),    // set by process() and updateState()
       mOffloaded(false),
-      mIsOutput(false)
-      , mSupportsFloat(false)
-{
+      mIsOutput(false),
+      mSupportsFloat(false),
+      mEffectInterfaceDebug(desc->name) {
     ALOGV("Constructor %p pinned %d", this, pinned);
     int lStatus;
 
@@ -587,6 +584,7 @@
     mStatus = callback->createEffectHal(
             &desc->uuid, sessionId, deviceId, &mEffectInterface);
     if (mStatus != NO_ERROR) {
+        ALOGE("%s createEffectHal failed: %d", __func__, mStatus);
         return;
     }
     lStatus = init_l();
@@ -596,12 +594,14 @@
     }
 
     setOffloaded_l(callback->isOffload(), callback->io());
-    ALOGV("Constructor success name %s, Interface %p", mDescriptor.name, mEffectInterface.get());
+    ALOGV("%s Constructor success name %s, Interface %p", __func__, mDescriptor.name,
+          mEffectInterface.get());
 
     return;
 Error:
     mEffectInterface.clear();
-    ALOGV("Constructor Error %d", mStatus);
+    mEffectInterfaceDebug += " init failed:" + std::to_string(lStatus);
+    ALOGE("%s Constructor Error %d", __func__, mStatus);
 }
 
 EffectModule::~EffectModule()
@@ -612,15 +612,16 @@
         AudioEffect::guidToString(&mDescriptor.uuid, uuidStr, sizeof(uuidStr));
         ALOGW("EffectModule %p destructor called with unreleased interface, effect %s",
                 this, uuidStr);
-        release_l();
+        release_l("~EffectModule");
     }
 
 }
 
+// return true if any effect started or stopped
 bool EffectModule::updateState_l() {
     audio_utils::lock_guard _l(mutex());
 
-    bool started = false;
+    bool startedOrStopped = false;
     switch (mState) {
     case RESTART:
         reset_l();
@@ -635,7 +636,7 @@
         }
         if (start_ll() == NO_ERROR) {
             mState = ACTIVE;
-            started = true;
+            startedOrStopped = true;
         } else {
             mState = IDLE;
         }
@@ -655,6 +656,7 @@
         // turn off sequence.
         if (--mDisableWaitCnt == 0) {
             reset_l();
+            startedOrStopped = true;
             mState = IDLE;
         }
         break;
@@ -669,7 +671,7 @@
         break;
     }
 
-    return started;
+    return startedOrStopped;
 }
 
 void EffectModule::process()
@@ -1044,8 +1046,21 @@
             return;
         }
 
-        (void)getCallback()->addEffectToHal(mEffectInterface);
-        mCurrentHalStream = getCallback()->io();
+        status_t status = getCallback()->addEffectToHal(mEffectInterface);
+        if (status == NO_ERROR) {
+            mCurrentHalStream = getCallback()->io();
+        }
+    }
+}
+
+void HwAccDeviceEffectModule::addEffectToHal_l()
+{
+    if (mAddedToHal) {
+        return;
+    }
+    status_t status = getCallback()->addEffectToHal(mEffectInterface);
+    if (status == NO_ERROR) {
+        mAddedToHal = true;
     }
 }
 
@@ -1127,13 +1142,14 @@
 }
 
 // must be called with EffectChain::mutex() held
-void EffectModule::release_l()
+void EffectModule::release_l(const std::string& from)
 {
     if (mEffectInterface != 0) {
         removeEffectFromHal_l();
         // release effect engine
         mEffectInterface->close();
         mEffectInterface.clear();
+        mEffectInterfaceDebug += " released by: " + from;
     }
 }
 
@@ -1150,6 +1166,16 @@
     return NO_ERROR;
 }
 
+status_t HwAccDeviceEffectModule::removeEffectFromHal_l()
+{
+    if (!mAddedToHal) {
+        return NO_ERROR;
+    }
+    getCallback()->removeEffectFromHal(mEffectInterface);
+    mAddedToHal = false;
+    return NO_ERROR;
+}
+
 // round up delta valid if value and divisor are positive.
 template <typename T>
 static T roundUpDelta(const T &value, const T &divisor) {
@@ -1357,36 +1383,49 @@
     }
 }
 
-status_t EffectModule::setVolume(uint32_t* left, uint32_t* right, bool controller) {
+status_t EffectModule::setVolume_l(uint32_t* left, uint32_t* right, bool controller, bool force) {
     AutoLockReentrant _l(mutex(), mSetVolumeReentrantTid);
     if (mStatus != NO_ERROR) {
         return mStatus;
     }
     status_t status = NO_ERROR;
     // Send volume indication if EFFECT_FLAG_VOLUME_IND is set and read back altered volume
-    // if controller flag is set (Note that controller == TRUE => EFFECT_FLAG_VOLUME_CTRL set)
-    if (isProcessEnabled() &&
+    // if controller flag is set (Note that controller == TRUE => the volume controller effect in
+    // the effect chain)
+    if (((isOffloadedOrDirect_l() ? isEnabled() : isProcessEnabled()) || force) &&
             ((mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_CTRL ||
              (mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_IND ||
              (mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_MONITOR)) {
-        status = setVolumeInternal(left, right, controller);
+        status = setVolumeInternal_ll(left, right, controller);
     }
     return status;
 }
 
-status_t EffectModule::setVolumeInternal(
+status_t EffectModule::setVolumeInternal_ll(
         uint32_t *left, uint32_t *right, bool controller) {
+    if (mVolume.has_value() && *left == mVolume.value()[0] && *right == mVolume.value()[1] &&
+            !controller) {
+        LOG_ALWAYS_FATAL_IF(
+                !mReturnedVolume.has_value(),
+                "The cached returned volume must not be null when the cached volume has value");
+        *left = mReturnedVolume.value()[0];
+        *right = mReturnedVolume.value()[1];
+        return NO_ERROR;
+    }
+    LOG_ALWAYS_FATAL_IF(mEffectInterface == nullptr, "%s", mEffectInterfaceDebug.c_str());
     uint32_t volume[2] = {*left, *right};
-    uint32_t *pVolume = controller ? volume : nullptr;
+    uint32_t* pVolume = isVolumeControl() ? volume : nullptr;
     uint32_t size = sizeof(volume);
     status_t status = mEffectInterface->command(EFFECT_CMD_SET_VOLUME,
                                                 size,
                                                 volume,
                                                 &size,
                                                 pVolume);
-    if (controller && status == NO_ERROR && size == sizeof(volume)) {
+    if (pVolume && status == NO_ERROR && size == sizeof(volume)) {
+        mVolume = {*left, *right}; // Cache the value that has been set
         *left = volume[0];
         *right = volume[1];
+        mReturnedVolume = {*left, *right};
     }
     return status;
 }
@@ -1718,6 +1757,9 @@
         const sp<media::IEffectClient>& effectClient,
         int32_t priority, bool notifyFramesProcessed)
 {
+    if (client == nullptr && effectClient == nullptr) {
+        return sp<InternalEffectHandle>::make(effect, notifyFramesProcessed);
+    }
     return sp<EffectHandle>::make(
             effect, client, effectClient, priority, notifyFramesProcessed);
 }
@@ -1725,12 +1767,14 @@
 EffectHandle::EffectHandle(const sp<IAfEffectBase>& effect,
                                          const sp<Client>& client,
                                          const sp<media::IEffectClient>& effectClient,
-                                         int32_t priority, bool notifyFramesProcessed)
-    : BnEffect(),
+                                         int32_t priority, bool notifyFramesProcessed,
+                                         bool isInternal,
+                                         audio_utils::MutexOrder mutexOrder)
+    : BnEffect(), mMutex(audio_utils::mutex{mutexOrder}),
     mEffect(effect), mEffectClient(media::EffectClientAsyncProxy::makeIfNeeded(effectClient)),
     mClient(client), mCblk(nullptr),
     mPriority(priority), mHasControl(false), mEnabled(false), mDisconnected(false),
-    mNotifyFramesProcessed(notifyFramesProcessed)
+    mNotifyFramesProcessed(notifyFramesProcessed), mIsInternal(isInternal)
 {
     ALOGV("constructor %p client %p", this, client.get());
     setMinSchedulerPolicy(SCHED_NORMAL, ANDROID_PRIORITY_AUDIO);
@@ -1897,7 +1941,7 @@
 
 void EffectHandle::disconnect(bool unpinIfLast)
 {
-    audio_utils::lock_guard _l(mutex());
+    audio_utils::unique_lock _l(mutex());
     ALOGV("disconnect(%s) %p", unpinIfLast ? "true" : "false", this);
     if (mDisconnected) {
         if (unpinIfLast) {
@@ -1909,11 +1953,19 @@
     {
         sp<IAfEffectBase> effect = mEffect.promote();
         if (effect != 0) {
+            // Unlock e.g. for device effect: may need to acquire AudioFlinger lock
+            // Also Internal Effect Handle would require Proxy lock (and vice versa).
+            if (isInternal()) {
+                _l.unlock();
+            }
             if (effect->disconnectHandle(this, unpinIfLast) > 0) {
                 ALOGW("%s Effect handle %p disconnected after thread destruction",
                     __func__, this);
             }
             effect->updatePolicyState();
+            if (isInternal()) {
+                _l.lock();
+            }
         }
     }
 
@@ -2155,27 +2207,31 @@
 /* static */
 sp<IAfEffectChain> IAfEffectChain::create(
         const sp<IAfThreadBase>& thread,
-        audio_session_t sessionId)
+        audio_session_t sessionId,
+        const sp<IAfThreadCallback>& afThreadCallback)
 {
-    return sp<EffectChain>::make(thread, sessionId);
+    return sp<EffectChain>::make(thread, sessionId, afThreadCallback);
 }
 
-EffectChain::EffectChain(const sp<IAfThreadBase>& thread,
-                                       audio_session_t sessionId)
+EffectChain::EffectChain(const sp<IAfThreadBase>& thread, audio_session_t sessionId,
+                         const sp<IAfThreadCallback>& afThreadCallback)
     : mSessionId(sessionId), mActiveTrackCnt(0), mTrackCnt(0), mTailBufferCount(0),
       mLeftVolume(UINT_MAX), mRightVolume(UINT_MAX),
       mNewLeftVolume(UINT_MAX), mNewRightVolume(UINT_MAX),
-      mEffectCallback(new EffectCallback(wp<EffectChain>(this), thread))
+      mEffectCallback(new EffectCallback(wp<EffectChain>(this), thread, afThreadCallback))
 {
-    mStrategy = thread->getStrategyForStream(AUDIO_STREAM_MUSIC);
-    mMaxTailBuffers = ((kProcessTailDurationMs * thread->sampleRate()) / 1000) /
-                                    thread->frameCount();
+    if (thread != nullptr) {
+        mStrategy = thread->getStrategyForStream(AUDIO_STREAM_MUSIC);
+        mMaxTailBuffers =
+            ((kProcessTailDurationMs * thread->sampleRate()) / 1000) /
+                thread->frameCount();
+    }
 }
 
-// getEffectFromDesc_l() must be called with IAfThreadBase::mutex() held
-sp<IAfEffectModule> EffectChain::getEffectFromDesc_l(
+sp<IAfEffectModule> EffectChain::getEffectFromDesc(
         effect_descriptor_t *descriptor) const
 {
+    audio_utils::lock_guard _l(mutex());
     size_t size = mEffects.size();
 
     for (size_t i = 0; i < size; i++) {
@@ -2189,6 +2245,7 @@
 // getEffectFromId_l() must be called with IAfThreadBase::mutex() held
 sp<IAfEffectModule> EffectChain::getEffectFromId_l(int id) const
 {
+    audio_utils::lock_guard _l(mutex());
     size_t size = mEffects.size();
 
     for (size_t i = 0; i < size; i++) {
@@ -2204,6 +2261,7 @@
 sp<IAfEffectModule> EffectChain::getEffectFromType_l(
         const effect_uuid_t *type) const
 {
+    audio_utils::lock_guard _l(mutex());
     size_t size = mEffects.size();
 
     for (size_t i = 0; i < size; i++) {
@@ -2287,6 +2345,9 @@
     }
     bool doResetVolume = false;
     for (size_t i = 0; i < size; i++) {
+        // reset volume when any effect just started or stopped.
+        // resetVolume_l will check if the volume controller effect in the chain needs update and
+        // apply the correct volume
         doResetVolume = mEffects[i]->updateState_l() || doResetVolume;
     }
     if (doResetVolume) {
@@ -2294,8 +2355,7 @@
     }
 }
 
-// createEffect_l() must be called with IAfThreadBase::mutex() held
-status_t EffectChain::createEffect_l(sp<IAfEffectModule>& effect,
+status_t EffectChain::createEffect(sp<IAfEffectModule>& effect,
                                                    effect_descriptor_t *desc,
                                                    int id,
                                                    audio_session_t sessionId,
@@ -2305,7 +2365,7 @@
     effect = new EffectModule(mEffectCallback, desc, id, sessionId, pinned, AUDIO_PORT_HANDLE_NONE);
     status_t lStatus = effect->status();
     if (lStatus == NO_ERROR) {
-        lStatus = addEffect_ll(effect);
+        lStatus = addEffect_l(effect);
     }
     if (lStatus != NO_ERROR) {
         effect.clear();
@@ -2313,22 +2373,22 @@
     return lStatus;
 }
 
-// addEffect_l() must be called with IAfThreadBase::mutex() held
-status_t EffectChain::addEffect_l(const sp<IAfEffectModule>& effect)
+status_t EffectChain::addEffect(const sp<IAfEffectModule>& effect)
 {
     audio_utils::lock_guard _l(mutex());
-    return addEffect_ll(effect);
+    return addEffect_l(effect);
 }
-// addEffect_l() must be called with IAfThreadBase::mutex() and EffectChain::mutex() held
-status_t EffectChain::addEffect_ll(const sp<IAfEffectModule>& effect)
+// addEffect_l() must be called with EffectChain::mutex() held
+status_t EffectChain::addEffect_l(const sp<IAfEffectModule>& effect)
 {
     effect->setCallback(mEffectCallback);
 
     effect_descriptor_t desc = effect->desc();
+    ssize_t idx_insert = 0;
     if ((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
         // Auxiliary effects are inserted at the beginning of mEffects vector as
         // they are processed first and accumulated in chain input buffer
-        mEffects.insertAt(effect, 0);
+        mEffects.insertAt(effect, idx_insert);
 
         // the input buffer for auxiliary effect contains mono samples in
         // 32 bit format. This is to avoid saturation in AudoMixer
@@ -2348,7 +2408,7 @@
         // by insert effects
         effect->setOutBuffer(mInBuffer);
     } else {
-        ssize_t idx_insert = getInsertIndex_ll(desc);
+        idx_insert = getInsertIndex_l(desc);
         if (idx_insert < 0) {
             return INVALID_OPERATION;
         }
@@ -2397,6 +2457,18 @@
     }
     effect->configure_l();
 
+    if (effect->isVolumeControl()) {
+        const auto volumeControlIndex = findVolumeControl_l(0, mEffects.size());
+        if (!volumeControlIndex.has_value() || (ssize_t)volumeControlIndex.value() < idx_insert) {
+            // If this effect will be the new volume control effect when it is enabled, force
+            // initializing the volume as 0 for volume control effect for safer ramping. The actual
+            // volume will be set from setVolume_l.
+            uint32_t left = 0;
+            uint32_t right = 0;
+            effect->setVolume_l(&left, &right, true /*controller*/, true /*force*/);
+        }
+    }
+
     return NO_ERROR;
 }
 
@@ -2409,7 +2481,7 @@
     return std::nullopt;
 }
 
-ssize_t EffectChain::getInsertIndex_ll(const effect_descriptor_t& desc) {
+ssize_t EffectChain::getInsertIndex_l(const effect_descriptor_t& desc) {
     // Insert effects are inserted at the end of mEffects vector as they are processed
     //  after track and auxiliary effects.
     // Insert effect order as a function of indicated preference:
@@ -2482,14 +2554,14 @@
     return idx_insert;
 }
 
-// removeEffect_l() must be called with IAfThreadBase::mutex() held
-size_t EffectChain::removeEffect_l(const sp<IAfEffectModule>& effect,
+size_t EffectChain::removeEffect(const sp<IAfEffectModule>& effect,
                                                  bool release)
 {
     audio_utils::lock_guard _l(mutex());
     size_t size = mEffects.size();
     uint32_t type = effect->desc().flags & EFFECT_FLAG_TYPE_MASK;
 
+    const bool hasThreadAttached = mEffectCallback->hasThreadAttached();
     for (size_t i = 0; i < size; i++) {
         if (effect == mEffects[i]) {
             // calling stop here will remove pre-processing effect from the audio HAL.
@@ -2500,10 +2572,10 @@
                 mEffects[i]->stop_l();
             }
             if (release) {
-                mEffects[i]->release_l();
+                mEffects[i]->release_l("EffectChain::removeEffect");
             }
-
-            if (type != EFFECT_FLAG_TYPE_AUXILIARY) {
+            // Skip operation when no thread attached (could lead to sigfpe as framecount is 0...)
+            if (hasThreadAttached && type != EFFECT_FLAG_TYPE_AUXILIARY) {
                 if (i == size - 1 && i != 0) {
                     mEffects[i - 1]->configure_l();
                     mEffects[i - 1]->setOutBuffer(mOutBuffer);
@@ -2515,7 +2587,7 @@
             // make sure the input buffer configuration for the new first effect in the chain
             // is updated if needed (can switch from HAL channel mask to mixer channel mask)
             if (type != EFFECT_FLAG_TYPE_AUXILIARY // TODO(b/284522658) breaks for aux FX, why?
-                    && i == 0 && size > 1) {
+                    && hasThreadAttached && i == 0 && size > 1) {
                 mEffects[0]->configure_l();
                 mEffects[0]->setInBuffer(mInBuffer);
                 mEffects[0]->updateAccessMode_l();      // reconfig if needed.
@@ -2533,6 +2605,7 @@
 // setDevices_l() must be called with IAfThreadBase::mutex() held
 void EffectChain::setDevices_l(const AudioDeviceTypeAddrVector &devices)
 {
+    audio_utils::lock_guard _l(mutex());
     size_t size = mEffects.size();
     for (size_t i = 0; i < size; i++) {
         mEffects[i]->setDevices(devices);
@@ -2542,6 +2615,7 @@
 // setInputDevice_l() must be called with IAfThreadBase::mutex() held
 void EffectChain::setInputDevice_l(const AudioDeviceTypeAddr &device)
 {
+    audio_utils::lock_guard _l(mutex());
     size_t size = mEffects.size();
     for (size_t i = 0; i < size; i++) {
         mEffects[i]->setInputDevice(device);
@@ -2551,6 +2625,7 @@
 // setMode_l() must be called with IAfThreadBase::mutex() held
 void EffectChain::setMode_l(audio_mode_t mode)
 {
+    audio_utils::lock_guard _l(mutex());
     size_t size = mEffects.size();
     for (size_t i = 0; i < size; i++) {
         mEffects[i]->setMode(mode);
@@ -2560,6 +2635,7 @@
 // setAudioSource_l() must be called with IAfThreadBase::mutex() held
 void EffectChain::setAudioSource_l(audio_source_t source)
 {
+    audio_utils::lock_guard _l(mutex());
     size_t size = mEffects.size();
     for (size_t i = 0; i < size; i++) {
         mEffects[i]->setAudioSource(source);
@@ -2587,6 +2663,7 @@
 
     // first update volume controller
     const auto volumeControlIndex = findVolumeControl_l(0, size);
+    // index of the effect chain volume controller
     const int ctrlIdx = volumeControlIndex.value_or(-1);
     const sp<IAfEffectModule> volumeControlEffect =
             volumeControlIndex.has_value() ? mEffects[ctrlIdx] : nullptr;
@@ -2600,30 +2677,33 @@
         }
         return volumeControlIndex.has_value();
     }
+    mVolumeControlEffect = volumeControlEffect;
 
-    if (volumeControlEffect != cachedVolumeControlEffect) {
-        // The volume control effect is a new one. Set the old one as full volume. Set the new onw
-        // as zero for safe ramping.
-        if (cachedVolumeControlEffect != nullptr) {
+    for (int i = 0; i < ctrlIdx; ++i) {
+        // For all effects before the effect that controls volume, they are not controlling the
+        // effect chain volume, if these effects has the volume control capability, set the volume
+        // to maximum to avoid double attenuation.
+        if (mEffects[i]->isVolumeControl()) {
             uint32_t leftMax = 1 << 24;
             uint32_t rightMax = 1 << 24;
-            cachedVolumeControlEffect->setVolume(&leftMax, &rightMax, true /*controller*/);
+            mEffects[i]->setVolume_l(&leftMax, &rightMax,
+                                     false /* not an effect chain volume controller */,
+                                     true /* force */);
         }
-        if (volumeControlEffect != nullptr) {
-            uint32_t leftZero = 0;
-            uint32_t rightZero = 0;
-            volumeControlEffect->setVolume(&leftZero, &rightZero, true /*controller*/);
-        }
-        mVolumeControlEffect = volumeControlEffect;
     }
+
     mLeftVolume = newLeft;
     mRightVolume = newRight;
 
     // second get volume update from volume controller
     if (ctrlIdx >= 0) {
-        mEffects[ctrlIdx]->setVolume(&newLeft, &newRight, true);
+        mEffects[ctrlIdx]->setVolume_l(&newLeft, &newRight,
+                                       true /* effect chain volume controller */);
         mNewLeftVolume = newLeft;
         mNewRightVolume = newRight;
+        ALOGD("%s sessionId %d volume controller effect %s set (%d, %d), ret (%d, %d)", __func__,
+              mSessionId, mEffects[ctrlIdx]->desc().name, mLeftVolume, mRightVolume, newLeft,
+              newRight);
     }
     // then indicate volume to all other effects in chain.
     // Pass altered volume to effects before volume controller
@@ -2642,9 +2722,11 @@
         }
         // Pass requested volume directly if this is volume monitor module
         if (mEffects[i]->isVolumeMonitor()) {
-            mEffects[i]->setVolume(left, right, false);
+            mEffects[i]->setVolume_l(left, right,
+                                     false /* not an effect chain volume controller */);
         } else {
-            mEffects[i]->setVolume(&lVol, &rVol, false);
+            mEffects[i]->setVolume_l(&lVol, &rVol,
+                                     false /* not an effect chain volume controller */);
         }
     }
     *left = newLeft;
@@ -2665,8 +2747,12 @@
     }
 }
 
-// containsHapticGeneratingEffect_l must be called with
-// IAfThreadBase::mutex() or EffectChain::mutex() held
+bool EffectChain::containsHapticGeneratingEffect()
+{
+    audio_utils::lock_guard _l(mutex());
+    return containsHapticGeneratingEffect_l();
+}
+// containsHapticGeneratingEffect_l must be called with EffectChain::mutex() held
 bool EffectChain::containsHapticGeneratingEffect_l()
 {
     for (size_t i = 0; i < mEffects.size(); ++i) {
@@ -2807,7 +2893,7 @@
         }
         if (desc->mRefCount++ == 0) {
             Vector< sp<IAfEffectModule> > effects;
-            getSuspendEligibleEffects_l(effects);
+            getSuspendEligibleEffects(effects);
             for (size_t i = 0; i < effects.size(); i++) {
                 setEffectSuspended_l(&effects[i]->desc().type, true);
             }
@@ -2857,7 +2943,7 @@
     return false;
 }
 
-bool EffectChain::isEffectEligibleForSuspend_l(const effect_descriptor_t& desc)
+bool EffectChain::isEffectEligibleForSuspend(const effect_descriptor_t& desc)
 {
     // auxiliary effects and visualizer are never suspended on output mix
     if ((mSessionId == AUDIO_SESSION_OUTPUT_MIX) &&
@@ -2870,12 +2956,13 @@
     return true;
 }
 
-void EffectChain::getSuspendEligibleEffects_l(
+void EffectChain::getSuspendEligibleEffects(
         Vector< sp<IAfEffectModule> > &effects)
 {
     effects.clear();
+    audio_utils::lock_guard _l(mutex());
     for (size_t i = 0; i < mEffects.size(); i++) {
-        if (isEffectEligibleForSuspend_l(mEffects[i]->desc())) {
+        if (isEffectEligibleForSuspend(mEffects[i]->desc())) {
             effects.add(mEffects[i]);
         }
     }
@@ -2896,7 +2983,7 @@
             if (index < 0) {
                 return;
             }
-            if (!isEffectEligibleForSuspend_l(effect->desc())) {
+            if (!isEffectEligibleForSuspend(effect->desc())) {
                 return;
             }
             setEffectSuspended_l(&effect->desc().type, enabled);
@@ -2944,6 +3031,12 @@
 
 void EffectChain::setThread(const sp<IAfThreadBase>& thread)
 {
+    if (thread != nullptr) {
+        mStrategy = thread->getStrategyForStream(AUDIO_STREAM_MUSIC);
+        mMaxTailBuffers =
+            ((kProcessTailDurationMs * thread->sampleRate()) / 1000) /
+                thread->frameCount();
+    }
     audio_utils::lock_guard _l(mutex());
     mEffectCallback->setThread(thread);
 }
@@ -3133,7 +3226,7 @@
 uint32_t EffectChain::EffectCallback::sampleRate() const {
     const sp<IAfThreadBase> t = thread().promote();
     if (t == nullptr) {
-        return 0;
+        return DEFAULT_OUTPUT_SAMPLE_RATE;
     }
     return t->sampleRate();
 }
@@ -3141,19 +3234,20 @@
 audio_channel_mask_t EffectChain::EffectCallback::inChannelMask(int id) const
 NO_THREAD_SAFETY_ANALYSIS
 // calling function 'hasAudioSession_l' requires holding mutex 'ThreadBase_Mutex' exclusively
+// calling function 'isFirstEffect_l' requires holding mutex 'EffectChain_Mutex' exclusively
 {
     const sp<IAfThreadBase> t = thread().promote();
     if (t == nullptr) {
-        return AUDIO_CHANNEL_NONE;
+        return AUDIO_CHANNEL_OUT_STEREO;
     }
     sp<IAfEffectChain> c = chain().promote();
     if (c == nullptr) {
-        return AUDIO_CHANNEL_NONE;
+        return AUDIO_CHANNEL_OUT_STEREO;
     }
 
     if (mThreadType == IAfThreadBase::SPATIALIZER) {
         if (c->sessionId() == AUDIO_SESSION_OUTPUT_STAGE) {
-            if (c->isFirstEffect(id)) {
+            if (c->isFirstEffect_l(id)) {
                 return t->mixerChannelMask();
             } else {
                 return t->channelMask();
@@ -3183,11 +3277,11 @@
 {
     const sp<IAfThreadBase> t = thread().promote();
     if (t == nullptr) {
-        return AUDIO_CHANNEL_NONE;
+        return AUDIO_CHANNEL_OUT_STEREO;
     }
     sp<IAfEffectChain> c = chain().promote();
     if (c == nullptr) {
-        return AUDIO_CHANNEL_NONE;
+        return AUDIO_CHANNEL_OUT_STEREO;
     }
 
     if (mThreadType == IAfThreadBase::SPATIALIZER) {
@@ -3221,7 +3315,8 @@
 size_t EffectChain::EffectCallback::frameCount() const {
     const sp<IAfThreadBase> t = thread().promote();
     if (t == nullptr) {
-        return 0;
+        // frameCount cannot be zero.
+        return 1;
     }
     return t->frameCount();
 }
@@ -3449,19 +3544,17 @@
             ALOGV("%s reusing HAL effect", __func__);
         } else {
             mDevicePort = *port;
-            mHalEffect = new EffectModule(mMyCallback,
-                                      const_cast<effect_descriptor_t *>(&mDescriptor),
-                                      mMyCallback->newEffectId(), AUDIO_SESSION_DEVICE,
-                                      false /* pinned */, port->id);
+            mHalEffect = sp<HwAccDeviceEffectModule>::make(mMyCallback,
+                    const_cast<effect_descriptor_t *>(&mDescriptor), mMyCallback->newEffectId(),
+                    port->id);
+            mHalEffect->configure_l();
             if (audio_is_input_device(mDevice.mType)) {
                 mHalEffect->setInputDevice(mDevice);
             } else {
                 mHalEffect->setDevices({mDevice});
             }
-            mHalEffect->configure_l();
         }
-        *handle = new EffectHandle(mHalEffect, nullptr, nullptr, 0 /*priority*/,
-                                   mNotifyFramesProcessed);
+        *handle = new InternalEffectHandle(mHalEffect, mNotifyFramesProcessed);
         status = (*handle)->initCheck();
         if (status == OK) {
             status = mHalEffect->addHandle((*handle).get());
@@ -3508,15 +3601,16 @@
     return status;
 }
 
-void DeviceEffectProxy::onReleasePatch(audio_patch_handle_t patchHandle) {
-    sp<IAfEffectHandle> effect;
+sp<IAfEffectHandle> DeviceEffectProxy::onReleasePatch(audio_patch_handle_t patchHandle) {
+    sp<IAfEffectHandle> disconnectedHandle;
     {
         audio_utils::lock_guard _l(proxyMutex());
         if (mEffectHandles.find(patchHandle) != mEffectHandles.end()) {
-            effect = mEffectHandles.at(patchHandle);
+            disconnectedHandle = std::move(mEffectHandles.at(patchHandle));
             mEffectHandles.erase(patchHandle);
         }
     }
+    return disconnectedHandle;
 }
 
 
@@ -3524,7 +3618,7 @@
 {
     audio_utils::lock_guard _l(proxyMutex());
     if (effect == mHalEffect) {
-        mHalEffect->release_l();
+        mHalEffect->release_l("DeviceEffectProxy::removeEffect");
         mHalEffect.clear();
         mDevicePort.id = AUDIO_PORT_HANDLE_NONE;
     }
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 46c44a6..9ecf89e 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -25,6 +25,8 @@
 #include <private/media/AudioEffectShared.h>
 
 #include <map>  // avoid transitive dependency
+#include <optional>
+#include <vector>
 
 namespace android {
 
@@ -177,7 +179,7 @@
 // the attached track(s) to accumulate their auxiliary channel.
 class EffectModule : public IAfEffectModule, public EffectBase {
 public:
-    EffectModule(const sp<EffectCallbackInterface>& callabck,
+    EffectModule(const sp<EffectCallbackInterface>& callback,
                     effect_descriptor_t *desc,
                     int id,
                     audio_session_t sessionId,
@@ -215,7 +217,8 @@
     }
     status_t setDevices(const AudioDeviceTypeAddrVector& devices) final EXCLUDES_EffectBase_Mutex;
     status_t setInputDevice(const AudioDeviceTypeAddr& device) final EXCLUDES_EffectBase_Mutex;
-    status_t setVolume(uint32_t *left, uint32_t *right, bool controller) final;
+    status_t setVolume_l(uint32_t* left, uint32_t* right, bool controller, bool force) final
+            REQUIRES(audio_utils::EffectChain_Mutex);
     status_t setMode(audio_mode_t mode) final EXCLUDES_EffectBase_Mutex;
     status_t setAudioSource(audio_source_t source) final EXCLUDES_EffectBase_Mutex;
     status_t start_l() final REQUIRES(audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex;
@@ -225,8 +228,8 @@
             REQUIRES(audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex;
     bool isOffloaded_l() const final
             REQUIRES(audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex;
-    void addEffectToHal_l() final REQUIRES(audio_utils::EffectChain_Mutex);
-    void release_l() final REQUIRES(audio_utils::EffectChain_Mutex);
+    void addEffectToHal_l() override REQUIRES(audio_utils::EffectChain_Mutex);
+    void release_l(const std::string& from = "") final REQUIRES(audio_utils::EffectChain_Mutex);
 
     sp<IAfEffectModule> asEffectModule() final { return this; }
 
@@ -234,9 +237,9 @@
     bool isSpatializer() const final;
 
     status_t setHapticScale_l(int id, os::HapticScale hapticScale) final
-            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectBase_Mutex;
+            REQUIRES(audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex;
     status_t setVibratorInfo_l(const media::AudioVibratorInfo& vibratorInfo) final
-            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectBase_Mutex;
+            REQUIRES(audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex;
     status_t sendMetadata_ll(const std::vector<playback_track_metadata_v7_t>& metadata) final
             REQUIRES(audio_utils::ThreadBase_Mutex,
                      audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex;
@@ -247,6 +250,9 @@
 
     void dump(int fd, const Vector<String16>& args) const final;
 
+protected:
+    sp<EffectHalInterface> mEffectInterface; // Effect module HAL
+
 private:
 
     // Maximum time allocated to effect engines to complete the turn off sequence
@@ -256,18 +262,18 @@
 
     status_t start_ll() REQUIRES(audio_utils::EffectChain_Mutex, audio_utils::EffectBase_Mutex);
     status_t stop_ll() REQUIRES(audio_utils::EffectChain_Mutex, audio_utils::EffectBase_Mutex);
-    status_t removeEffectFromHal_l() REQUIRES(audio_utils::EffectChain_Mutex);
+    status_t removeEffectFromHal_l() override REQUIRES(audio_utils::EffectChain_Mutex);
     status_t sendSetAudioDevicesCommand(const AudioDeviceTypeAddrVector &devices, uint32_t cmdCode);
     effect_buffer_access_e requiredEffectBufferAccessMode() const {
         return mConfig.inputCfg.buffer.raw == mConfig.outputCfg.buffer.raw
                 ? EFFECT_BUFFER_ACCESS_WRITE : EFFECT_BUFFER_ACCESS_ACCUMULATE;
     }
 
-    status_t setVolumeInternal(uint32_t *left, uint32_t *right, bool controller);
-
+    status_t setVolumeInternal_ll(uint32_t* left, uint32_t* right,
+                                  bool controller /* the volume controller effect of the chain */)
+            REQUIRES(audio_utils::EffectChain_Mutex, audio_utils::EffectBase_Mutex);
 
     effect_config_t     mConfig;    // input and output audio configuration
-    sp<EffectHalInterface> mEffectInterface; // Effect module HAL
     sp<EffectBufferHalInterface> mInBuffer;  // Buffers for interacting with HAL
     sp<EffectBufferHalInterface> mOutBuffer;
     status_t            mStatus;    // initialization status
@@ -289,12 +295,12 @@
     template <typename MUTEX>
     class AutoLockReentrant {
     public:
-        AutoLockReentrant(MUTEX& mutex, pid_t allowedTid)
+        AutoLockReentrant(MUTEX& mutex, pid_t allowedTid) ACQUIRE(audio_utils::EffectBase_Mutex)
             : mMutex(gettid() == allowedTid ? nullptr : &mutex)
         {
             if (mMutex != nullptr) mMutex->lock();
         }
-        ~AutoLockReentrant() {
+        ~AutoLockReentrant() RELEASE(audio_utils::EffectBase_Mutex) {
             if (mMutex != nullptr) mMutex->unlock();
         }
     private:
@@ -304,6 +310,26 @@
     static constexpr pid_t INVALID_PID = (pid_t)-1;
     // this tid is allowed to call setVolume() without acquiring the mutex.
     pid_t mSetVolumeReentrantTid = INVALID_PID;
+
+    // Cache the volume that has been set successfully.
+    std::optional<std::vector<uint32_t>> mVolume;
+    // Cache the volume that returned from the effect when setting volume successfully. The value
+    // here is used to indicate the volume to apply before this effect.
+    std::optional<std::vector<uint32_t>> mReturnedVolume;
+    // TODO: b/315995877, remove this debugging string after root cause
+    std::string mEffectInterfaceDebug GUARDED_BY(audio_utils::EffectChain_Mutex);
+};
+
+class HwAccDeviceEffectModule : public EffectModule {
+public:
+    HwAccDeviceEffectModule(const sp<EffectCallbackInterface>& callback, effect_descriptor_t *desc,
+            int id, audio_port_handle_t deviceId) :
+        EffectModule(callback, desc, id, AUDIO_SESSION_DEVICE, /* pinned */ false, deviceId) {}
+    void addEffectToHal_l() final REQUIRES(audio_utils::EffectChain_Mutex);
+
+private:
+    status_t removeEffectFromHal_l() final REQUIRES(audio_utils::EffectChain_Mutex);
+    bool mAddedToHal = false;
 };
 
 // The EffectHandle class implements the IEffect interface. It provides resources
@@ -318,7 +344,8 @@
     EffectHandle(const sp<IAfEffectBase>& effect,
             const sp<Client>& client,
             const sp<media::IEffectClient>& effectClient,
-            int32_t priority, bool notifyFramesProcessed);
+            int32_t priority, bool notifyFramesProcessed, bool isInternal = false,
+            audio_utils::MutexOrder mutexOrder = audio_utils::MutexOrder::kEffectHandle_Mutex);
     ~EffectHandle() override;
     status_t onTransact(
             uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) final;
@@ -338,6 +365,11 @@
                                       int32_t* _aidl_return) final;
 
     const sp<Client>& client() const final { return mClient; }
+    /**
+     * Checks if the handle is internal, aka created by AudioFlinger for internal needs (e.g.
+     * device effect HAL handle or device effect thread handle).
+     */
+    virtual bool isInternal() const { return mIsInternal; }
 
     sp<android::media::IEffect> asIEffect() final {
         return sp<android::media::IEffect>::fromExisting(this);
@@ -375,15 +407,18 @@
 
     void dumpToBuffer(char* buffer, size_t size) const final;
 
+protected:
+    // protects IEffect method calls
+    mutable audio_utils::mutex mMutex;
 
 private:
     DISALLOW_COPY_AND_ASSIGN(EffectHandle);
 
-    audio_utils::mutex& mutex() const RETURN_CAPABILITY(android::audio_utils::EffectHandle_Mutex) {
+    virtual audio_utils::mutex& mutex() const
+            RETURN_CAPABILITY(android::audio_utils::EffectHandle_Mutex) {
         return mMutex;
     }
-    // protects IEffect method calls
-    mutable audio_utils::mutex mMutex{audio_utils::MutexOrder::kEffectHandle_Mutex};
+
     const wp<IAfEffectBase> mEffect;               // pointer to controlled EffectModule
     const sp<media::IEffectClient> mEffectClient;  // callback interface for client notifications
     /*const*/ sp<Client> mClient;            // client for shared memory allocation, see
@@ -399,6 +434,28 @@
     bool mDisconnected;                      // Set to true by disconnect()
     const bool mNotifyFramesProcessed;       // true if the client callback event
                                              // EVENT_FRAMES_PROCESSED must be generated
+    const bool mIsInternal;
+};
+
+/**
+ * There are 2 types of effects:
+ * -Session Effect: handle is directly called from the client, without AudioFlinger lock.
+ * -Device Effect: a device effect proxy is aggregating a collection of internal effect handles that
+ * controls the same effect added on all audio patches involving the device effect selected port
+ * requested either by a client or by AudioPolicyEffects. These internal effect handles do not have
+ * client. Sequence flow implies a different locking order, hence the lock is specialied.
+ */
+class InternalEffectHandle : public EffectHandle {
+public:
+    InternalEffectHandle(const sp<IAfEffectBase>& effect, bool notifyFramesProcessed) :
+            EffectHandle(effect, /* client= */ nullptr, /* effectClient= */ nullptr,
+                         /* priority= */ 0, notifyFramesProcessed, /* isInternal */ true,
+                         audio_utils::MutexOrder::kDeviceEffectHandle_Mutex) {}
+
+    virtual audio_utils::mutex& mutex() const
+            RETURN_CAPABILITY(android::audio_utils::DeviceEffectHandle_Mutex) {
+        return mMutex;
+    }
 };
 
 // the EffectChain class represents a group of effects associated to one audio session.
@@ -412,7 +469,9 @@
 // it also provide it's own input buffer used by the track as accumulation buffer.
 class EffectChain : public IAfEffectChain {
 public:
-    EffectChain(const sp<IAfThreadBase>& thread, audio_session_t sessionId);
+    EffectChain(const sp<IAfThreadBase>& thread,
+                audio_session_t sessionId,
+                const sp<IAfThreadCallback>& afThreadCallback);
 
     void process_l() final REQUIRES(audio_utils::EffectChain_Mutex);
 
@@ -420,25 +479,25 @@
         return mMutex;
     }
 
-    status_t createEffect_l(sp<IAfEffectModule>& effect, effect_descriptor_t* desc, int id,
+    status_t createEffect(sp<IAfEffectModule>& effect, effect_descriptor_t* desc, int id,
                             audio_session_t sessionId, bool pinned) final
-            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex;
+            EXCLUDES_EffectChain_Mutex;
+    status_t addEffect(const sp<IAfEffectModule>& handle) final
+            EXCLUDES_EffectChain_Mutex;
     status_t addEffect_l(const sp<IAfEffectModule>& handle) final
-            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex;
-    status_t addEffect_ll(const sp<IAfEffectModule>& handle) final
-            REQUIRES(audio_utils::ThreadBase_Mutex, audio_utils::EffectChain_Mutex);
-    size_t removeEffect_l(const sp<IAfEffectModule>& handle, bool release = false) final
-            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex;
+            REQUIRES(audio_utils::EffectChain_Mutex);
+    size_t removeEffect(const sp<IAfEffectModule>& handle, bool release = false) final
+            EXCLUDES_EffectChain_Mutex;
 
     audio_session_t sessionId() const final { return mSessionId; }
     void setSessionId(audio_session_t sessionId) final { mSessionId = sessionId; }
 
-    sp<IAfEffectModule> getEffectFromDesc_l(effect_descriptor_t* descriptor) const final
-            REQUIRES(audio_utils::ThreadBase_Mutex);
+    sp<IAfEffectModule> getEffectFromDesc(effect_descriptor_t* descriptor) const final
+            EXCLUDES_EffectChain_Mutex;
     sp<IAfEffectModule> getEffectFromId_l(int id) const final
-            REQUIRES(audio_utils::ThreadBase_Mutex);
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex;
     sp<IAfEffectModule> getEffectFromType_l(const effect_uuid_t* type) const final
-            REQUIRES(audio_utils::ThreadBase_Mutex);
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex;
     std::vector<int> getEffectIds_l() const final REQUIRES(audio_utils::ThreadBase_Mutex);
     // FIXME use float to improve the dynamic range
 
@@ -446,11 +505,13 @@
                    bool force = false) final EXCLUDES_EffectChain_Mutex;
     void resetVolume_l() final REQUIRES(audio_utils::EffectChain_Mutex);
     void setDevices_l(const AudioDeviceTypeAddrVector& devices) final
-            REQUIRES(audio_utils::ThreadBase_Mutex);
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex;
     void setInputDevice_l(const AudioDeviceTypeAddr& device) final
-            REQUIRES(audio_utils::ThreadBase_Mutex);
-    void setMode_l(audio_mode_t mode) final REQUIRES(audio_utils::ThreadBase_Mutex);
-    void setAudioSource_l(audio_source_t source) final REQUIRES(audio_utils::ThreadBase_Mutex);
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex;
+    void setMode_l(audio_mode_t mode) final
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex;
+    void setAudioSource_l(audio_source_t source) final
+            REQUIRES(audio_utils::ThreadBase_Mutex)  EXCLUDES_EffectChain_Mutex;
 
     void setInBuffer(const sp<EffectBufferHalInterface>& buffer) final {
         mInBuffer = buffer;
@@ -517,8 +578,11 @@
     bool isCompatibleWithThread_l(const sp<IAfThreadBase>& thread) const final
             REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex;
 
-    // Requires either IAfThreadBase::mutex() or EffectChain::mutex() held
-    bool containsHapticGeneratingEffect_l() final;
+    bool containsHapticGeneratingEffect() final
+            EXCLUDES_EffectChain_Mutex;
+
+    bool containsHapticGeneratingEffect_l() final
+            REQUIRES(audio_utils::EffectChain_Mutex);
 
     void setHapticScale_l(int id, os::HapticScale hapticScale) final
             REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex;
@@ -527,15 +591,19 @@
 
     wp<IAfThreadBase> thread() const final { return mEffectCallback->thread(); }
 
-    bool isFirstEffect(int id) const final {
+    bool isFirstEffect_l(int id) const final REQUIRES(audio_utils::EffectChain_Mutex) {
         return !mEffects.isEmpty() && id == mEffects[0]->id();
     }
 
     void dump(int fd, const Vector<String16>& args) const final;
 
-    size_t numberOfEffects() const final { return mEffects.size(); }
+    size_t numberOfEffects() const final {
+      audio_utils::lock_guard _l(mutex());
+      return mEffects.size();
+    }
 
     sp<IAfEffectModule> getEffectModule(size_t index) const final {
+        audio_utils::lock_guard _l(mutex());
         return mEffects[index];
     }
 
@@ -560,11 +628,13 @@
         // Note: ctors taking a weak pointer to their owner must not promote it
         // during construction (but may keep a reference for later promotion).
         EffectCallback(const wp<EffectChain>& owner,
-                const sp<IAfThreadBase>& thread)  // we take a sp<> but store a wp<>.
+                const sp<IAfThreadBase>& thread,
+                const sp<IAfThreadCallback>& afThreadCallback)  // we take a sp<> but store a wp<>.
             : mChain(owner)
-            , mThread(thread) {
-            mThreadType = thread->type();
-            mAfThreadCallback = thread->afThreadCallback();
+            , mThread(thread), mAfThreadCallback(afThreadCallback) {
+            if (thread != nullptr) {
+                mThreadType = thread->type();
+            }
         }
 
         status_t createEffectHal(const effect_uuid_t *pEffectUuid,
@@ -606,6 +676,9 @@
         wp<IAfEffectChain> chain() const final { return mChain; }
 
         bool isAudioPolicyReady() const final {
+            if (mAfThreadCallback == nullptr) {
+                return false;
+            }
             return mAfThreadCallback->isAudioPolicyReady();
         }
 
@@ -613,15 +686,19 @@
 
         void setThread(const sp<IAfThreadBase>& thread) {
             mThread = thread;
-            mThreadType = thread->type();
-            mAfThreadCallback = thread->afThreadCallback();
+            if (thread != nullptr) {
+                mThreadType = thread->type();
+                mAfThreadCallback = thread->afThreadCallback();
+            }
         }
-
+        bool hasThreadAttached() const {
+            return thread().promote() != nullptr;
+        }
     private:
         const wp<IAfEffectChain> mChain;
         mediautils::atomic_wp<IAfThreadBase> mThread;
         sp<IAfThreadCallback> mAfThreadCallback;
-        IAfThreadBase::type_t mThreadType;
+        IAfThreadBase::type_t mThreadType = IAfThreadBase::MIXER;
     };
 
     DISALLOW_COPY_AND_ASSIGN(EffectChain);
@@ -637,8 +714,8 @@
 
     // get a list of effect modules to suspend when an effect of the type
     // passed is enabled.
-    void getSuspendEligibleEffects_l(Vector<sp<IAfEffectModule>>& effects)
-            REQUIRES(audio_utils::ThreadBase_Mutex);
+    void getSuspendEligibleEffects(Vector<sp<IAfEffectModule>>& effects)
+            EXCLUDES_EffectChain_Mutex;
 
     // get an effect module if it is currently enable
     sp<IAfEffectModule> getEffectIfEnabled_l(const effect_uuid_t* type)
@@ -646,8 +723,7 @@
     // true if the effect whose descriptor is passed can be suspended
     // OEMs can modify the rules implemented in this method to exclude specific effect
     // types or implementations from the suspend/restore mechanism.
-    bool isEffectEligibleForSuspend_l(const effect_descriptor_t& desc)
-            REQUIRES(audio_utils::ThreadBase_Mutex);
+    bool isEffectEligibleForSuspend(const effect_descriptor_t& desc);
 
     static bool isEffectEligibleForBtNrecSuspend_l(const effect_uuid_t* type)
             REQUIRES(audio_utils::ThreadBase_Mutex);
@@ -660,15 +736,15 @@
     void setVolumeForOutput_l(uint32_t left, uint32_t right)
             REQUIRES(audio_utils::EffectChain_Mutex);
 
-    ssize_t getInsertIndex_ll(const effect_descriptor_t& desc)
-            REQUIRES(audio_utils::ThreadBase_Mutex, audio_utils::EffectChain_Mutex);
+    ssize_t getInsertIndex_l(const effect_descriptor_t& desc)
+            REQUIRES(audio_utils::EffectChain_Mutex);
 
     std::optional<size_t> findVolumeControl_l(size_t from, size_t to) const
             REQUIRES(audio_utils::EffectChain_Mutex);
 
     // mutex protecting effect list
     mutable audio_utils::mutex mMutex{audio_utils::MutexOrder::kEffectChain_Mutex};
-             Vector<sp<IAfEffectModule>> mEffects; // list of effect modules
+             Vector<sp<IAfEffectModule>> mEffects  GUARDED_BY(mutex()); // list of effect modules
              audio_session_t mSessionId; // audio session ID
              sp<EffectBufferHalInterface> mInBuffer;  // chain input buffer
              sp<EffectBufferHalInterface> mOutBuffer; // chain output buffer
@@ -683,7 +759,7 @@
              uint32_t mRightVolume;      // previous volume on right channel
              uint32_t mNewLeftVolume;       // new volume on left channel
              uint32_t mNewRightVolume;      // new volume on right channel
-             product_strategy_t mStrategy; // strategy for this effect chain
+             product_strategy_t mStrategy = PRODUCT_STRATEGY_NONE; // strategy for this effect chain
              // mSuspendedEffects lists all effects currently suspended in the chain.
              // Use effect type UUID timelow field as key. There is no real risk of identical
              // timeLow fields among effect type UUIDs.
@@ -717,7 +793,7 @@
     status_t onUpdatePatch(audio_patch_handle_t oldPatchHandle, audio_patch_handle_t newPatchHandle,
            const IAfPatchPanel::Patch& patch) final;
 
-    void onReleasePatch(audio_patch_handle_t patchHandle) final;
+    sp<IAfEffectHandle> onReleasePatch(audio_patch_handle_t patchHandle) final;
 
     size_t removeEffect(const sp<IAfEffectModule>& effect) final;
 
@@ -766,7 +842,10 @@
         audio_channel_mask_t outChannelMask() const override;
         uint32_t outChannelCount() const override;
         audio_channel_mask_t hapticChannelMask() const override { return AUDIO_CHANNEL_NONE; }
-        size_t frameCount() const override  { return 0; }
+        /**
+         * frameCount cannot be zero.
+         */
+        size_t frameCount() const override  { return 1; }
         uint32_t latency() const override  { return 0; }
 
         status_t addEffectToHal(const sp<EffectHalInterface>& effect) override;
@@ -778,7 +857,7 @@
         void checkSuspendOnEffectEnabled(const sp<IAfEffectBase>& effect __unused,
                               bool enabled __unused, bool threadLocked __unused) override {}
         void resetVolume_l() override REQUIRES(audio_utils::EffectChain_Mutex) {}
-        product_strategy_t strategy() const override  { return static_cast<product_strategy_t>(0); }
+        product_strategy_t strategy() const override  { return PRODUCT_STRATEGY_NONE; }
         int32_t activeTrackCnt() const override { return 0; }
         void onEffectEnable(const sp<IAfEffectBase>& effect __unused) override;
         void onEffectDisable(const sp<IAfEffectBase>& effect __unused) override;
diff --git a/services/audioflinger/IAfEffect.h b/services/audioflinger/IAfEffect.h
index fd4dd62..3452e94 100644
--- a/services/audioflinger/IAfEffect.h
+++ b/services/audioflinger/IAfEffect.h
@@ -153,7 +153,7 @@
 
 public:
     static sp<IAfEffectModule> create(
-            const sp<EffectCallbackInterface>& callabck,
+            const sp<EffectCallbackInterface>& callback,
             effect_descriptor_t *desc,
             int id,
             audio_session_t sessionId,
@@ -163,7 +163,9 @@
     virtual int16_t *inBuffer() const = 0;
     virtual status_t setDevices(const AudioDeviceTypeAddrVector &devices) = 0;
     virtual status_t setInputDevice(const AudioDeviceTypeAddr &device) = 0;
-    virtual status_t setVolume(uint32_t *left, uint32_t *right, bool controller) = 0;
+    virtual status_t setVolume_l(uint32_t* left, uint32_t* right,
+                                 bool controller /* effect controlling chain volume */,
+                                 bool force = false) REQUIRES(audio_utils::EffectChain_Mutex) = 0;
     virtual status_t setOffloaded_l(bool offloaded, audio_io_handle_t io) = 0;
     virtual bool isOffloaded_l() const = 0;
 
@@ -181,17 +183,19 @@
     virtual bool isSpatializer() const = 0;
 
     virtual status_t setHapticScale_l(int id, os::HapticScale hapticScale)
-            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectBase_Mutex = 0;
+            REQUIRES(audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex = 0;
     virtual status_t setVibratorInfo_l(const media::AudioVibratorInfo& vibratorInfo)
-            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectBase_Mutex = 0;
+            REQUIRES(audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex = 0;
     virtual status_t sendMetadata_ll(const std::vector<playback_track_metadata_v7_t>& metadata)
             REQUIRES(audio_utils::ThreadBase_Mutex,
                      audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex = 0;
+    // return true if there was a state change from STARTING to ACTIVE, or STOPPED to IDLE, effect
+    // chain will do a volume reset in these two cases
+    virtual bool updateState_l()
+            REQUIRES(audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex = 0;
 
 private:
     virtual void process() = 0;
-    virtual bool updateState_l()
-            REQUIRES(audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex = 0;
     virtual void reset_l() REQUIRES(audio_utils::EffectChain_Mutex) = 0;
     virtual status_t configure_l() REQUIRES(audio_utils::EffectChain_Mutex) = 0;
     virtual status_t init_l()
@@ -210,7 +214,8 @@
 
     virtual status_t stop_l() = 0;
     virtual void addEffectToHal_l() = 0;
-    virtual void release_l() = 0;
+    virtual status_t removeEffectFromHal_l() = 0;
+    virtual void release_l(const std::string& from) = 0;
 };
 
 class IAfEffectChain : public RefBase {
@@ -218,7 +223,8 @@
 public:
     static sp<IAfEffectChain> create(
             const sp<IAfThreadBase>& thread,
-            audio_session_t sessionId);
+            audio_session_t sessionId,
+            const sp<IAfThreadCallback>& afThreadCallback);
 
     // special key used for an entry in mSuspendedEffects keyed vector
     // corresponding to a suspend all request.
@@ -232,35 +238,36 @@
 
     virtual audio_utils::mutex& mutex() const RETURN_CAPABILITY(audio_utils::EffectChain_Mutex) = 0;
 
-    virtual status_t createEffect_l(sp<IAfEffectModule>& effect, effect_descriptor_t* desc, int id,
+    virtual status_t createEffect(sp<IAfEffectModule>& effect, effect_descriptor_t* desc, int id,
                                     audio_session_t sessionId, bool pinned)
-            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex = 0;
+            EXCLUDES_EffectChain_Mutex = 0;
 
+    virtual status_t addEffect(const sp<IAfEffectModule>& handle)
+            EXCLUDES_EffectChain_Mutex = 0;
     virtual status_t addEffect_l(const sp<IAfEffectModule>& handle)
-            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex = 0;
-    virtual status_t addEffect_ll(const sp<IAfEffectModule>& handle)
-            REQUIRES(audio_utils::ThreadBase_Mutex, audio_utils::EffectChain_Mutex) = 0;
-    virtual size_t removeEffect_l(const sp<IAfEffectModule>& handle,
+            REQUIRES(audio_utils::EffectChain_Mutex) = 0;
+    virtual size_t removeEffect(const sp<IAfEffectModule>& handle,
                                   bool release = false) EXCLUDES_EffectChain_Mutex = 0;
 
     virtual audio_session_t sessionId() const = 0;
     virtual void setSessionId(audio_session_t sessionId) = 0;
 
-    virtual sp<IAfEffectModule> getEffectFromDesc_l(effect_descriptor_t* descriptor) const
-            REQUIRES(audio_utils::ThreadBase_Mutex) = 0;
+    virtual sp<IAfEffectModule> getEffectFromDesc(effect_descriptor_t* descriptor) const
+            EXCLUDES_EffectChain_Mutex = 0;
     virtual sp<IAfEffectModule> getEffectFromId_l(int id) const
-            REQUIRES(audio_utils::ThreadBase_Mutex) = 0;
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex = 0;
     virtual sp<IAfEffectModule> getEffectFromType_l(const effect_uuid_t* type) const
-            REQUIRES(audio_utils::ThreadBase_Mutex) = 0;
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex = 0;
     virtual std::vector<int> getEffectIds_l() const = 0;
     virtual bool setVolume(uint32_t* left, uint32_t* right,
                            bool force = false) EXCLUDES_EffectChain_Mutex = 0;
     virtual void resetVolume_l() REQUIRES(audio_utils::EffectChain_Mutex) = 0;
     virtual void setDevices_l(const AudioDeviceTypeAddrVector& devices)
-            REQUIRES(audio_utils::ThreadBase_Mutex) = 0;
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex = 0;
     virtual void setInputDevice_l(const AudioDeviceTypeAddr& device)
-            REQUIRES(audio_utils::ThreadBase_Mutex) = 0;
-    virtual void setMode_l(audio_mode_t mode) REQUIRES(audio_utils::ThreadBase_Mutex) = 0;
+            REQUIRES(audio_utils::ThreadBase_Mutex)  EXCLUDES_EffectChain_Mutex = 0;
+    virtual void setMode_l(audio_mode_t mode)
+            REQUIRES(audio_utils::ThreadBase_Mutex)  EXCLUDES_EffectChain_Mutex = 0;
     virtual void setAudioSource_l(audio_source_t source)
             REQUIRES(audio_utils::ThreadBase_Mutex) = 0;
 
@@ -317,7 +324,11 @@
     virtual bool isCompatibleWithThread_l(const sp<IAfThreadBase>& thread) const
             REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex = 0;
 
-    virtual bool containsHapticGeneratingEffect_l() = 0;
+    virtual bool containsHapticGeneratingEffect()
+            EXCLUDES_EffectChain_Mutex = 0;
+
+    virtual bool containsHapticGeneratingEffect_l()
+            REQUIRES(audio_utils::EffectChain_Mutex) = 0;
 
     virtual void setHapticScale_l(int id, os::HapticScale hapticScale)
             REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex = 0;
@@ -327,7 +338,7 @@
     virtual wp<IAfThreadBase> thread() const = 0;
     virtual void setThread(const sp<IAfThreadBase>& thread) EXCLUDES_EffectChain_Mutex = 0;
 
-    virtual bool isFirstEffect(int id) const = 0;
+    virtual bool isFirstEffect_l(int id) const REQUIRES(audio_utils::EffectChain_Mutex) = 0;
 
     virtual size_t numberOfEffects() const = 0;
     virtual sp<IAfEffectModule> getEffectModule(size_t index) const = 0;
@@ -389,7 +400,14 @@
     virtual status_t onUpdatePatch(audio_patch_handle_t oldPatchHandle,
             audio_patch_handle_t newPatchHandle,
             const IAfPatchPanel::Patch& patch) = 0;
-    virtual void onReleasePatch(audio_patch_handle_t patchHandle) = 0;
+    /**
+     * Checks (and release) of the effect handle is linked with the given release patch handle.
+     *
+     * @param patchHandle handle of the released patch
+     * @return a reference on the effect handle released if any, nullptr otherwise.
+     * It allows to delay the destruction of the handle.
+     */
+    virtual sp<IAfEffectHandle> onReleasePatch(audio_patch_handle_t patchHandle) = 0;
 
     virtual void dump2(int fd, int spaces) const = 0; // TODO(b/291319101) naming?
 
diff --git a/services/audioflinger/IAfThread.h b/services/audioflinger/IAfThread.h
index 8d51bcd..3d5638e 100644
--- a/services/audioflinger/IAfThread.h
+++ b/services/audioflinger/IAfThread.h
@@ -96,7 +96,8 @@
     virtual bool updateOrphanEffectChains(const sp<IAfEffectModule>& effect)
             EXCLUDES_AudioFlinger_Mutex = 0;
     virtual status_t moveEffectChain_ll(audio_session_t sessionId,
-            IAfPlaybackThread* srcThread, IAfPlaybackThread* dstThread)
+            IAfPlaybackThread* srcThread, IAfPlaybackThread* dstThread,
+            IAfEffectChain* srcChain = nullptr)
             REQUIRES(mutex(), audio_utils::ThreadBase_Mutex) = 0;
 
     virtual void requestLogMerge() = 0;
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index ce9dd99..d24f90e 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -1486,8 +1486,8 @@
     }
 
     if (IAfEffectModule::isHapticGenerator(&desc->type) && mHapticChannelCount == 0) {
-        ALOGW("%s: thread doesn't support haptic playback while the effect is HapticGenerator",
-                __func__);
+        ALOGW("%s: thread (%s) doesn't support haptic playback while the effect is HapticGenerator",
+              __func__, threadTypeToString(mType));
         return BAD_VALUE;
     }
 
@@ -1666,12 +1666,12 @@
         if (chain == 0) {
             // create a new chain for this session
             ALOGV("createEffect_l() new effect chain for session %d", sessionId);
-            chain = IAfEffectChain::create(this, sessionId);
+            chain = IAfEffectChain::create(this, sessionId, mAfThreadCallback);
             addEffectChain_l(chain);
             chain->setStrategy(getStrategyForSession_l(sessionId));
             chainCreated = true;
         } else {
-            effect = chain->getEffectFromDesc_l(desc);
+            effect = chain->getEffectFromDesc(desc);
         }
 
         ALOGV("createEffect_l() got effect %p on chain %p", effect.get(), chain.get());
@@ -1679,7 +1679,7 @@
         if (effect == 0) {
             effectId = mAfThreadCallback->nextUniqueId(AUDIO_UNIQUE_ID_USE_EFFECT);
             // create a new effect module if none present in the chain
-            lStatus = chain->createEffect_l(effect, desc, effectId, sessionId, pinned);
+            lStatus = chain->createEffect(effect, desc, effectId, sessionId, pinned);
             if (lStatus != NO_ERROR) {
                 goto Exit;
             }
@@ -1697,6 +1697,7 @@
             const std::optional<media::AudioVibratorInfo> defaultVibratorInfo =
                     std::move(mAfThreadCallback->getDefaultVibratorInfo_l());
             if (defaultVibratorInfo) {
+                audio_utils::lock_guard _cl(chain->mutex());
                 // Only set the vibrator info when it is a valid one.
                 effect->setVibratorInfo_l(*defaultVibratorInfo);
             }
@@ -1718,7 +1719,7 @@
     if (!probe && lStatus != NO_ERROR && lStatus != ALREADY_EXISTS) {
         audio_utils::lock_guard _l(mutex());
         if (effectCreated) {
-            chain->removeEffect_l(effect);
+            chain->removeEffect(effect);
         }
         if (chainCreated) {
             removeEffectChain_l(chain);
@@ -1819,7 +1820,7 @@
     if (chain == 0) {
         // create a new chain for this session
         ALOGV("%s: new effect chain for session %d", __func__, sessionId);
-        chain = IAfEffectChain::create(this, sessionId);
+        chain = IAfEffectChain::create(this, sessionId, mAfThreadCallback);
         addEffectChain_l(chain);
         chain->setStrategy(getStrategyForSession_l(sessionId));
         chainCreated = true;
@@ -1834,7 +1835,7 @@
 
     effect->setOffloaded_l(mType == OFFLOAD, mId);
 
-    status_t status = chain->addEffect_l(effect);
+    status_t status = chain->addEffect(effect);
     if (status != NO_ERROR) {
         if (chainCreated) {
             removeEffectChain_l(chain);
@@ -1861,7 +1862,7 @@
     sp<IAfEffectChain> chain = effect->getCallback()->chain().promote();
     if (chain != 0) {
         // remove effect chain if removing last effect
-        if (chain->removeEffect_l(effect, release) == 0) {
+        if (chain->removeEffect(effect, release) == 0) {
             removeEffectChain_l(chain);
         }
     } else {
@@ -2904,7 +2905,7 @@
         sp<IAfEffectChain> chain = getEffectChain_l(track->sessionId());
         if (mHapticChannelMask != AUDIO_CHANNEL_NONE
                 && ((track->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
-                        || (chain != nullptr && chain->containsHapticGeneratingEffect_l()))) {
+                        || (chain != nullptr && chain->containsHapticGeneratingEffect()))) {
             // Unlock due to VibratorService will lock for this call and will
             // call Tracks.mute/unmute which also require thread's lock.
             mutex().unlock();
@@ -4829,7 +4830,7 @@
         }
         if (mHapticChannelCount > 0 &&
                 ((track->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
-                        || (chain != nullptr && chain->containsHapticGeneratingEffect_l()))) {
+                        || (chain != nullptr && chain->containsHapticGeneratingEffect()))) {
             mutex().unlock();
             // Unlock due to VibratorService will lock for this call and will
             // call Tracks.mute/unmute which also require thread's lock.
diff --git a/services/audiopolicy/common/include/policy.h b/services/audiopolicy/common/include/policy.h
index 4643bd1..cf594c6 100644
--- a/services/audiopolicy/common/include/policy.h
+++ b/services/audiopolicy/common/include/policy.h
@@ -33,10 +33,6 @@
 
 static const uint32_t SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY = 5000;
 
-// For mixed output and inputs, the policy will use max mixer sampling rates.
-// Do not limit sampling rate otherwise
-#define SAMPLE_RATE_HZ_MAX 192000
-
 // Used when a client opens a capture stream, without specifying a desired sample rate.
 #define SAMPLE_RATE_HZ_DEFAULT 48000
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index c502fc2..7002e63 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -282,6 +282,11 @@
 
     const AudioProfileVector& getSupportedProfiles() { return mSupportedProfiles; }
 
+    /**
+     * @brief checks if all devices in device vector are attached to the HwModule or not
+     * @return true if all the devices in device vector are attached, otherwise false
+     */
+    bool areAllDevicesAttached() const;
     // Return a string to describe the DeviceVector. The sensitive information will only be
     // added to the string if `includeSensitiveInfo` is true.
     std::string toString(bool includeSensitiveInfo = false) const;
diff --git a/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
index c2e4b11..f7b9b33 100644
--- a/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
@@ -32,11 +32,15 @@
 class EffectDescriptor : public RefBase
 {
 public:
-    EffectDescriptor(const effect_descriptor_t *desc, bool isMusicEffect,
-                     int id, audio_io_handle_t io, audio_session_t session) :
-        mId(id), mIo(io), mSession(session), mEnabled(false), mSuspended(false),
-        mIsMusicEffect(isMusicEffect)
-    {
+  EffectDescriptor(const effect_descriptor_t* desc, bool isMusicEffect, int id,
+                   audio_io_handle_t io, audio_session_t session)
+      : mId(id),
+        mIo(io),
+        mIsOrphan(io == AUDIO_IO_HANDLE_NONE),
+        mSession(session),
+        mEnabled(false),
+        mSuspended(false),
+        mIsMusicEffect(isMusicEffect) {
         memcpy (&mDesc, desc, sizeof(effect_descriptor_t));
     }
 
@@ -95,8 +99,18 @@
      * @return ioHandle if found, AUDIO_IO_HANDLE_NONE otherwise.
      */
     audio_io_handle_t getIoForSession(audio_session_t sessionId,
-                                      const effect_uuid_t *effectType = nullptr);
-    bool hasOrphansForSession(audio_session_t sessionId);
+                                      const effect_uuid_t* effectType = nullptr) const;
+
+    /**
+     * @brief Checks if there is at least one orphan effect with given sessionId and optional effect
+     * type uuid.
+     * @param sessionId Session ID.
+     * @param effectType Optional effect type UUID pointer to effect_uuid_t, nullptr by default.
+     * @return True if there is an orphan effect for given sessionId and type UUID, false otherwise.
+     */
+    bool hasOrphansForSession(audio_session_t sessionId,
+                              const effect_uuid_t* effectType = nullptr) const;
+
     EffectDescriptorCollection getOrphanEffectsForSession(audio_session_t sessionId) const;
     void dump(String8 *dst, int spaces = 0, bool verbose = true) const;
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index 9f7b8fc..46a04de 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -541,4 +541,14 @@
     return filteredDevices;
 }
 
+bool DeviceVector::areAllDevicesAttached() const
+{
+    for (const auto &device : *this) {
+        if (!device->isAttached()) {
+            return false;
+        }
+    }
+    return true;
+}
+
 } // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
index c85df0f..090da6c 100644
--- a/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
@@ -210,11 +210,13 @@
     }
 }
 
-bool EffectDescriptorCollection::hasOrphansForSession(audio_session_t sessionId)
-{
+bool EffectDescriptorCollection::hasOrphansForSession(audio_session_t sessionId,
+                                                      const effect_uuid_t* effectType) const {
     for (size_t i = 0; i < size(); ++i) {
         sp<EffectDescriptor> effect = valueAt(i);
-        if (effect->mSession == sessionId && effect->mIsOrphan) {
+        if (effect->mSession == sessionId && effect->mIsOrphan &&
+            (effectType == nullptr ||
+             memcmp(&effect->mDesc.type, effectType, sizeof(effect_uuid_t)) == 0)) {
             return true;
         }
     }
@@ -235,7 +237,7 @@
 }
 
 audio_io_handle_t EffectDescriptorCollection::getIoForSession(audio_session_t sessionId,
-                                                              const effect_uuid_t *effectType)
+                                                              const effect_uuid_t *effectType) const
 {
     for (size_t i = 0; i < size(); ++i) {
         sp<EffectDescriptor> effect = valueAt(i);
diff --git a/services/audiopolicy/common/managerdefinitions/src/PolicyAudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/PolicyAudioPort.cpp
index ce8178f..0ee84c1 100644
--- a/services/audiopolicy/common/managerdefinitions/src/PolicyAudioPort.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/PolicyAudioPort.cpp
@@ -20,6 +20,7 @@
 #include "PolicyAudioPort.h"
 #include "HwModule.h"
 #include <policy.h>
+#include <system/audio.h>
 
 #ifndef ARRAY_SIZE
 #define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index 13cc165..2c2f90d 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -142,7 +142,8 @@
         }
         break;
     case AUDIO_POLICY_FORCE_FOR_VIBRATE_RINGING:
-        if (config != AUDIO_POLICY_FORCE_BT_SCO && config != AUDIO_POLICY_FORCE_NONE) {
+        if (config != AUDIO_POLICY_FORCE_BT_SCO && config != AUDIO_POLICY_FORCE_BT_BLE
+                && config != AUDIO_POLICY_FORCE_NONE) {
             ALOGW("setForceUse() invalid config %d for VIBRATE_RINGING", config);
             return BAD_VALUE;
         }
@@ -355,6 +356,40 @@
                 }
             }
         }
+
+        // if LEA headset is connected and we are told to use it, play ringtone over
+        // speaker and BT LEA
+        if (!availableOutputDevices.getDevicesFromTypes(getAudioDeviceOutAllBleSet()).isEmpty()) {
+            DeviceVector devices2;
+            devices2 = availableOutputDevices.getFirstDevicesFromTypes({
+                    AUDIO_DEVICE_OUT_BLE_HEADSET, AUDIO_DEVICE_OUT_BLE_SPEAKER});
+            // Use ONLY Bluetooth LEA output when ringing in vibration mode
+            if (!((getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)
+                    && (strategy == STRATEGY_ENFORCED_AUDIBLE))) {
+                if (getForceUse(AUDIO_POLICY_FORCE_FOR_VIBRATE_RINGING)
+                        == AUDIO_POLICY_FORCE_BT_BLE) {
+                    if (!devices2.isEmpty()) {
+                        devices = devices2;
+                        break;
+                    }
+                }
+            }
+            // Use both Bluetooth LEA and phone default output when ringing in normal mode
+            if (audio_is_ble_out_device(getPreferredDeviceTypeForLegacyStrategy(
+                    availableOutputDevices, STRATEGY_PHONE))) {
+                if (strategy == STRATEGY_SONIFICATION) {
+                    devices.replaceDevicesByType(
+                            AUDIO_DEVICE_OUT_SPEAKER,
+                            availableOutputDevices.getDevicesFromType(
+                                    AUDIO_DEVICE_OUT_SPEAKER_SAFE));
+                }
+                if (!devices2.isEmpty()) {
+                    devices.add(devices2);
+                    break;
+                }
+            }
+        }
+
         // The second device used for sonification is the same as the device used by media strategy
         FALLTHROUGH_INTENDED;
 
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 8561a9d..811ca12 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -374,6 +374,7 @@
         checkLeBroadcastRoutes(wasLeUnicastActive, nullptr, 0);
 
         mpClientInterface->onAudioPortListUpdate();
+        ALOGV("%s() completed for device: %s", __func__, device->toString().c_str());
         return NO_ERROR;
     }  // end if is output device
 
@@ -389,6 +390,8 @@
                 return INVALID_OPERATION;
             }
 
+            ALOGV("%s() connecting device %s", __func__, device->toString().c_str());
+
             if (mAvailableInputDevices.add(device) < 0) {
                 return NO_MEMORY;
             }
@@ -461,6 +464,7 @@
         }
 
         mpClientInterface->onAudioPortListUpdate();
+        ALOGV("%s() completed for device: %s", __func__, device->toString().c_str());
         return NO_ERROR;
     } // end if is input device
 
@@ -1516,7 +1520,7 @@
                 (config->channel_mask == desc->getChannelMask()) &&
                 (session == desc->mDirectClientSession)) {
                 desc->mDirectOpenCount++;
-                ALOGV("%s reusing direct output %d for session %d", __func__,
+                ALOGI("%s reusing direct output %d for session %d", __func__,
                     mOutputs.keyAt(i), session);
                 *output = mOutputs.keyAt(i);
                 return NO_ERROR;
@@ -1526,17 +1530,23 @@
 
     if (!profile->canOpenNewIo()) {
         if (!com::android::media::audioserver::direct_track_reprioritization()) {
+            ALOGW("%s profile %s can't open new output maxOpenCount reached", __func__,
+                  profile->getName().c_str());
             return NAME_NOT_FOUND;
         } else if ((profile->getFlags() & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) != 0) {
             // MMAP gracefully handles lack of an exclusive track resource by mixing
             // above the audio framework. For AAudio to know that the limit is reached,
             // return an error.
+            ALOGW("%s profile %s can't open new mmap output maxOpenCount reached", __func__,
+                  profile->getName().c_str());
             return NAME_NOT_FOUND;
         } else {
             // Close outputs on this profile, if available, to free resources for this request
             for (int i = 0; i < mOutputs.size() && !profile->canOpenNewIo(); i++) {
                 const auto desc = mOutputs.valueAt(i);
                 if (desc->mProfile == profile) {
+                    ALOGV("%s closeOutput %d to prioritize session %d on profile %s", __func__,
+                          desc->mIoHandle, session, profile->getName().c_str());
                     closeOutput(desc->mIoHandle);
                 }
             }
@@ -1545,6 +1555,8 @@
 
     // Unable to close streams to find free resources for this request
     if (!profile->canOpenNewIo()) {
+        ALOGW("%s profile %s can't open new output maxOpenCount reached", __func__,
+              profile->getName().c_str());
         return NAME_NOT_FOUND;
     }
 
@@ -1642,12 +1654,14 @@
     }
 
     // Use the spatializer output if the content can be spatialized, no preferred mixer
-    // was specified and offload or direct playback is not explicitly requested.
+    // was specified and offload or direct playback is not explicitly requested, and there is no
+    // haptic channel included in playback
     *isSpatialized = false;
-    if (mSpatializerOutput != nullptr
-            && canBeSpatializedInt(attr, config, devices.toTypeAddrVector())
-            && prefMixerConfigInfo == nullptr
-            && ((*flags & (AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD | AUDIO_OUTPUT_FLAG_DIRECT)) == 0)) {
+    if (mSpatializerOutput != nullptr &&
+        canBeSpatializedInt(attr, config, devices.toTypeAddrVector()) &&
+        prefMixerConfigInfo == nullptr &&
+        ((*flags & (AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD | AUDIO_OUTPUT_FLAG_DIRECT)) == 0) &&
+        checkHapticCompatibilityOnSpatializerOutput(config, session)) {
         *isSpatialized = true;
         return mSpatializerOutput->mIoHandle;
     }
@@ -2073,6 +2087,7 @@
     // matching criteria values in priority order for best matching output so far
     std::vector<uint32_t> bestMatchCriteria(8, 0);
 
+    const bool hasOrphanHaptic = mEffects.hasOrphansForSession(sessionId, FX_IID_HAPTICGENERATOR);
     const uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
     const uint32_t hapticChannelCount = audio_channel_count_from_out_mask(
         channelMask & AUDIO_CHANNEL_HAPTIC_ALL);
@@ -2093,13 +2108,20 @@
         // When using haptic output, same audio format and sample rate are required.
         const uint32_t outputHapticChannelCount = audio_channel_count_from_out_mask(
             outputDesc->getChannelMask() & AUDIO_CHANNEL_HAPTIC_ALL);
-        if ((hapticChannelCount == 0) != (outputHapticChannelCount == 0)) {
+        // skip if haptic channel specified but output does not support it, or output support haptic
+        // but there is no haptic channel requested AND no orphan haptic effect exist
+        if ((hapticChannelCount != 0 && outputHapticChannelCount == 0) ||
+            (hapticChannelCount == 0 && outputHapticChannelCount != 0 && !hasOrphanHaptic)) {
             continue;
         }
-        if (outputHapticChannelCount >= hapticChannelCount
-            && format == outputDesc->getFormat()
-            && samplingRate == outputDesc->getSamplingRate()) {
-                currentMatchCriteria[0] = outputHapticChannelCount;
+        // In the case of audio-coupled-haptic playback, there is no format conversion and
+        // resampling in the framework, same format/channel/sampleRate for client and the output
+        // thread is required. In the case of HapticGenerator effect, do not require format
+        // matching.
+        if ((outputHapticChannelCount >= hapticChannelCount && format == outputDesc->getFormat() &&
+             samplingRate == outputDesc->getSamplingRate()) ||
+            (outputHapticChannelCount != 0 && hasOrphanHaptic)) {
+            currentMatchCriteria[0] = outputHapticChannelCount;
         }
 
         // functional flags match
@@ -3276,8 +3298,8 @@
         ALOGW("%s: no group for stream %s, bailing out", __func__, toString(stream).c_str());
         return NO_ERROR;
     }
-    ALOGV("%s: stream %s attributes=%s", __func__,
-          toString(stream).c_str(), toString(attributes).c_str());
+    ALOGV("%s: stream %s attributes=%s, index %d , device 0x%X", __func__,
+          toString(stream).c_str(), toString(attributes).c_str(), index, device);
     return setVolumeIndexForAttributes(attributes, index, device);
 }
 
@@ -3455,8 +3477,8 @@
     bool hasVoice = hasVoiceStream(volumeCurves.getStreamTypes());
     if (((index < volumeCurves.getVolumeIndexMin()) && !(hasVoice && index == 0)) ||
             (index > volumeCurves.getVolumeIndexMax())) {
-        ALOGD("%s: wrong index %d min=%d max=%d", __FUNCTION__, index,
-              volumeCurves.getVolumeIndexMin(), volumeCurves.getVolumeIndexMax());
+        ALOGE("%s: wrong index %d min=%d max=%d, device 0x%X", __FUNCTION__, index,
+              volumeCurves.getVolumeIndexMin(), volumeCurves.getVolumeIndexMax(), device);
         return BAD_VALUE;
     }
     if (!audio_is_output_device(device)) {
@@ -3588,7 +3610,7 @@
                                 int session,
                                 int id)
 {
-    if (session != AUDIO_SESSION_DEVICE) {
+    if (session != AUDIO_SESSION_DEVICE && io != AUDIO_IO_HANDLE_NONE) {
         ssize_t index = mOutputs.indexOfKey(io);
         if (index < 0) {
             index = mInputs.indexOfKey(io);
@@ -6098,6 +6120,34 @@
     return true;
 }
 
+// The Spatializer output is compatible with Haptic use cases if:
+// 1. the Spatializer output thread supports Haptic, and format/sampleRate are same
+// with client if client haptic channel bits were set, or
+// 2. the Spatializer output thread does not support Haptic, and client did not ask haptic by
+// including the haptic bits or creating the HapticGenerator effect for same session.
+bool AudioPolicyManager::checkHapticCompatibilityOnSpatializerOutput(
+        const audio_config_t* config, audio_session_t sessionId) const {
+    const auto clientHapticChannel =
+            audio_channel_count_from_out_mask(config->channel_mask & AUDIO_CHANNEL_HAPTIC_ALL);
+    const auto threadOutputHapticChannel = audio_channel_count_from_out_mask(
+            mSpatializerOutput->getChannelMask() & AUDIO_CHANNEL_HAPTIC_ALL);
+
+    if (threadOutputHapticChannel) {
+        // check format and sampleRate match if client haptic channel mask exist
+        if (clientHapticChannel) {
+            return mSpatializerOutput->getFormat() == config->format &&
+                   mSpatializerOutput->getSamplingRate() == config->sample_rate;
+        }
+        return true;
+    } else {
+        // in the case of the Spatializer output channel mask does not have haptic channel bits, it
+        // means haptic use cases (either the client channelmask includes haptic bits, or created a
+        // HapticGenerator effect for this session) are not supported.
+        return clientHapticChannel == 0 &&
+               !mEffects.hasOrphansForSession(sessionId, FX_IID_HAPTICGENERATOR);
+    }
+}
+
 void AudioPolicyManager::checkVirtualizerClientRoutes() {
     std::set<audio_stream_type_t> streamsToInvalidate;
     for (size_t i = 0; i < mOutputs.size(); i++) {
@@ -6386,6 +6436,14 @@
             if (!mConfig->getOutputDevices().contains(supportedDevice)) {
                 continue;
             }
+
+            if (outProfile->isMmap() && !outProfile->hasDynamicAudioProfile()
+                && availProfileDevices.areAllDevicesAttached()) {
+                ALOGV("%s skip opening output for mmap profile %s", __func__,
+                        outProfile->getTagName().c_str());
+                continue;
+            }
+
             sp<SwAudioOutputDescriptor> outputDesc = new SwAudioOutputDescriptor(outProfile,
                                                                                  mpClientInterface);
             audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
@@ -6445,6 +6503,14 @@
                     __func__, inProfile->getTagName().c_str());
                 continue;
             }
+
+            if (inProfile->isMmap() && !inProfile->hasDynamicAudioProfile()
+                && availProfileDevices.areAllDevicesAttached()) {
+                ALOGV("%s skip opening input for mmap profile %s", __func__,
+                        inProfile->getTagName().c_str());
+                continue;
+            }
+
             sp<AudioInputDescriptor> inputDesc =
                     new AudioInputDescriptor(inProfile, mpClientInterface);
 
@@ -6452,12 +6518,12 @@
             status_t status = inputDesc->open(nullptr,
                                               availProfileDevices.itemAt(0),
                                               AUDIO_SOURCE_MIC,
-                                              AUDIO_INPUT_FLAG_NONE,
+                                              (audio_input_flags_t) inProfile->getFlags(),
                                               &input);
             if (status != NO_ERROR) {
-                ALOGW("Cannot open input stream for device %s on hw module %s",
-                      availProfileDevices.toString().c_str(),
-                      hwModule->getName());
+                ALOGW("%s: Cannot open input stream for device %s for profile %s on hw module %s",
+                        __func__, availProfileDevices.toString().c_str(),
+                        inProfile->getTagName().c_str(), hwModule->getName());
                 continue;
             }
             for (const auto &device : availProfileDevices) {
@@ -6565,8 +6631,8 @@
                 sp<IOProfile> profile = hwModule->getOutputProfiles()[j];
                 if (profile->supportsDevice(device)) {
                     profiles.add(profile);
-                    ALOGV("checkOutputsForDevice(): adding profile %zu from module %s",
-                          j, hwModule->getName());
+                    ALOGV("%s(): adding profile %s from module %s",
+                            __func__, profile->getTagName().c_str(), hwModule->getName());
                 }
             }
         }
@@ -6599,7 +6665,11 @@
             if (j != outputs.size()) {
                 continue;
             }
-
+            if (profile->isMmap() && !profile->hasDynamicAudioProfile()) {
+                ALOGV("%s skip opening output for mmap profile %s",
+                      __func__, profile->getTagName().c_str());
+                continue;
+            }
             if (!profile->canOpenNewIo()) {
                 ALOGW("Max Output number %u already opened for this profile %s",
                       profile->maxOpenCount, profile->getTagName().c_str());
@@ -6660,9 +6730,8 @@
                 if (!profile->supportsDevice(device)) {
                     continue;
                 }
-                ALOGV("checkOutputsForDevice(): "
-                        "clearing direct output profile %zu on module %s",
-                        j, hwModule->getName());
+                ALOGV("%s(): clearing direct output profile %s on module %s",
+                        __func__, profile->getTagName().c_str(), hwModule->getName());
                 profile->clearAudioProfiles();
                 if (!profile->hasDynamicAudioProfile()) {
                     continue;
@@ -6717,8 +6786,8 @@
 
                 if (profile->supportsDevice(device)) {
                     profiles.add(profile);
-                    ALOGV("checkInputsForDevice(): adding profile %zu from module %s",
-                          profile_index, hwModule->getName());
+                    ALOGV("%s : adding profile %s from module %s", __func__,
+                          profile->getTagName().c_str(), hwModule->getName());
                 }
             }
         }
@@ -6750,15 +6819,22 @@
                 continue;
             }
 
+            if (profile->isMmap() && !profile->hasDynamicAudioProfile()) {
+                ALOGV("%s skip opening input for mmap profile %s",
+                      __func__, profile->getTagName().c_str());
+                continue;
+            }
             if (!profile->canOpenNewIo()) {
-                ALOGW("Max Input number %u already opened for this profile %s",
-                      profile->maxOpenCount, profile->getTagName().c_str());
+                ALOGW("%s Max Input number %u already opened for this profile %s",
+                      __func__, profile->maxOpenCount, profile->getTagName().c_str());
                 continue;
             }
 
             desc = new AudioInputDescriptor(profile, mpClientInterface);
             audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
-            status = desc->open(nullptr, device, AUDIO_SOURCE_MIC, AUDIO_INPUT_FLAG_NONE, &input);
+            ALOGV("%s opening input for profile %s", __func__, profile->getTagName().c_str());
+            status = desc->open(nullptr, device, AUDIO_SOURCE_MIC,
+                                (audio_input_flags_t) profile->getFlags(), &input);
 
             if (status == NO_ERROR) {
                 const String8& address = String8(device->address().c_str());
@@ -6769,7 +6845,8 @@
                 }
                 updateAudioProfiles(device, input, profile);
                 if (!profile->hasValidAudioProfile()) {
-                    ALOGW("checkInputsForDevice() direct input missing param");
+                    ALOGW("%s direct input missing param for profile %s", __func__,
+                            profile->getTagName().c_str());
                     desc->close();
                     input = AUDIO_IO_HANDLE_NONE;
                 }
@@ -6780,18 +6857,20 @@
             } // endif input != 0
 
             if (input == AUDIO_IO_HANDLE_NONE) {
-                ALOGW("%s could not open input for device %s", __func__,
-                       device->toString().c_str());
+                ALOGW("%s could not open input for device %s on profile %s", __func__,
+                       device->toString().c_str(), profile->getTagName().c_str());
                 profiles.removeAt(profile_index);
                 profile_index--;
             } else {
                 if (audio_device_is_digital(device->type())) {
                     device->importAudioPortAndPickAudioProfile(profile);
                 }
-                ALOGV("checkInputsForDevice(): adding input %d", input);
+                ALOGV("%s: adding input %d for profile %s", __func__,
+                        input, profile->getTagName().c_str());
 
                 if (checkCloseInput(desc)) {
-                    ALOGV("%s closing input %d", __func__, input);
+                    ALOGV("%s: closing input %d for profile %s", __func__,
+                            input, profile->getTagName().c_str());
                     closeInput(input);
                 }
             }
@@ -6810,8 +6889,8 @@
                  profile_index++) {
                 sp<IOProfile> profile = hwModule->getInputProfiles()[profile_index];
                 if (profile->supportsDevice(device)) {
-                    ALOGV("checkInputsForDevice(): clearing direct input profile %zu on module %s",
-                            profile_index, hwModule->getName());
+                    ALOGV("%s: clearing direct input profile %s on module %s", __func__,
+                            profile->getTagName().c_str(), hwModule->getName());
                     profile->clearAudioProfiles();
                 }
             }
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 2db0445..c8cced6 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -1359,6 +1359,9 @@
 
         PortHandleVector getClientsForStream(audio_stream_type_t streamType) const;
         void invalidateStreams(StreamTypeVector streams) const;
+
+        bool checkHapticCompatibilityOnSpatializerOutput(const audio_config_t* config,
+                                                         audio_session_t sessionId) const;
 };
 
 };
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 508d487..8a2c07f 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -292,7 +292,7 @@
     }
 
     List<const CameraDeviceBase::PhysicalCameraSettingsList> metadataRequestList;
-    std::list<const SurfaceMap> surfaceMapList;
+    std::list<SurfaceMap> surfaceMapList;
     submitInfo->mRequestId = mRequestIdCounter;
     uint32_t loopCounter = 0;
 
diff --git a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
index 68e9ad4..fe99241 100644
--- a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
@@ -1583,7 +1583,7 @@
         // The chrome plane could be either Cb first, or Cr first. Take the
         // smaller address.
         uint8_t *src = std::min(yuvBuffer.dataCb, yuvBuffer.dataCr);
-        MediaImage2::PlaneIndex dstPlane = codecUvOffsetDiff > 0 ? MediaImage2::U : MediaImage2::V;
+        MediaImage2::PlaneIndex dstPlane = codecUPlaneFirst ? MediaImage2::U : MediaImage2::V;
         for (auto row = top/2; row < (top+height)/2; row++) {
             uint8_t *dst = codecBuffer->data() + imageInfo->mPlane[dstPlane].mOffset +
                     imageInfo->mPlane[dstPlane].mRowInc * (row - top/2);
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index cfc41c3..aceb5c0 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -136,7 +136,7 @@
      * Output lastFrameNumber is the expected last frame number of the list of requests.
      */
     virtual status_t captureList(const List<const PhysicalCameraSettingsList> &requests,
-                                 const std::list<const SurfaceMap> &surfaceMaps,
+                                 const std::list<SurfaceMap> &surfaceMaps,
                                  int64_t *lastFrameNumber = NULL) = 0;
 
     /**
@@ -152,7 +152,7 @@
      * Output lastFrameNumber is the last frame number of the previous streaming request.
      */
     virtual status_t setStreamingRequestList(const List<const PhysicalCameraSettingsList> &requests,
-                                             const std::list<const SurfaceMap> &surfaceMaps,
+                                             const std::list<SurfaceMap> &surfaceMaps,
                                              int64_t *lastFrameNumber = NULL) = 0;
 
     /**
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index e9d15dc..6e56dda 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -718,7 +718,7 @@
 
 status_t Camera3Device::convertMetadataListToRequestListLocked(
         const List<const PhysicalCameraSettingsList> &metadataList,
-        const std::list<const SurfaceMap> &surfaceMaps,
+        const std::list<SurfaceMap> &surfaceMaps,
         bool repeating, nsecs_t requestTimeNs,
         RequestList *requestList) {
     if (requestList == NULL) {
@@ -728,7 +728,7 @@
 
     int32_t burstId = 0;
     List<const PhysicalCameraSettingsList>::const_iterator metadataIt = metadataList.begin();
-    std::list<const SurfaceMap>::const_iterator surfaceMapIt = surfaceMaps.begin();
+    std::list<SurfaceMap>::const_iterator surfaceMapIt = surfaceMaps.begin();
     for (; metadataIt != metadataList.end() && surfaceMapIt != surfaceMaps.end();
             ++metadataIt, ++surfaceMapIt) {
         sp<CaptureRequest> newRequest = setUpRequestLocked(*metadataIt, *surfaceMapIt);
@@ -776,14 +776,14 @@
     ATRACE_CALL();
 
     List<const PhysicalCameraSettingsList> requestsList;
-    std::list<const SurfaceMap> surfaceMaps;
+    std::list<SurfaceMap> surfaceMaps;
     convertToRequestList(requestsList, surfaceMaps, request);
 
     return captureList(requestsList, surfaceMaps, lastFrameNumber);
 }
 
 void Camera3Device::convertToRequestList(List<const PhysicalCameraSettingsList>& requestsList,
-        std::list<const SurfaceMap>& surfaceMaps,
+        std::list<SurfaceMap>& surfaceMaps,
         const CameraMetadata& request) {
     PhysicalCameraSettingsList requestList;
     requestList.push_back({getId(), request});
@@ -801,7 +801,7 @@
 
 status_t Camera3Device::submitRequestsHelper(
         const List<const PhysicalCameraSettingsList> &requests,
-        const std::list<const SurfaceMap> &surfaceMaps,
+        const std::list<SurfaceMap> &surfaceMaps,
         bool repeating,
         /*out*/
         int64_t *lastFrameNumber) {
@@ -849,7 +849,7 @@
 }
 
 status_t Camera3Device::captureList(const List<const PhysicalCameraSettingsList> &requestsList,
-                                    const std::list<const SurfaceMap> &surfaceMaps,
+                                    const std::list<SurfaceMap> &surfaceMaps,
                                     int64_t *lastFrameNumber) {
     ATRACE_CALL();
 
@@ -861,7 +861,7 @@
     ATRACE_CALL();
 
     List<const PhysicalCameraSettingsList> requestsList;
-    std::list<const SurfaceMap> surfaceMaps;
+    std::list<SurfaceMap> surfaceMaps;
     convertToRequestList(requestsList, surfaceMaps, request);
 
     return setStreamingRequestList(requestsList, /*surfaceMap*/surfaceMaps,
@@ -870,7 +870,7 @@
 
 status_t Camera3Device::setStreamingRequestList(
         const List<const PhysicalCameraSettingsList> &requestsList,
-        const std::list<const SurfaceMap> &surfaceMaps, int64_t *lastFrameNumber) {
+        const std::list<SurfaceMap> &surfaceMaps, int64_t *lastFrameNumber) {
     ATRACE_CALL();
 
     return submitRequestsHelper(requestsList, surfaceMaps, /*repeating*/true, lastFrameNumber);
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index fe9f292..a8a840c 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -129,12 +129,12 @@
     // idle state
     status_t capture(CameraMetadata &request, int64_t *lastFrameNumber = NULL) override;
     status_t captureList(const List<const PhysicalCameraSettingsList> &requestsList,
-            const std::list<const SurfaceMap> &surfaceMaps,
+            const std::list<SurfaceMap> &surfaceMaps,
             int64_t *lastFrameNumber = NULL) override;
     status_t setStreamingRequest(const CameraMetadata &request,
             int64_t *lastFrameNumber = NULL) override;
     status_t setStreamingRequestList(const List<const PhysicalCameraSettingsList> &requestsList,
-            const std::list<const SurfaceMap> &surfaceMaps,
+            const std::list<SurfaceMap> &surfaceMaps,
             int64_t *lastFrameNumber = NULL) override;
     status_t clearStreamingRequest(int64_t *lastFrameNumber = NULL) override;
 
@@ -702,17 +702,17 @@
 
     status_t convertMetadataListToRequestListLocked(
             const List<const PhysicalCameraSettingsList> &metadataList,
-            const std::list<const SurfaceMap> &surfaceMaps,
+            const std::list<SurfaceMap> &surfaceMaps,
             bool repeating, nsecs_t requestTimeNs,
             /*out*/
             RequestList *requestList);
 
     void convertToRequestList(List<const PhysicalCameraSettingsList>& requestsList,
-            std::list<const SurfaceMap>& surfaceMaps,
+            std::list<SurfaceMap>& surfaceMaps,
             const CameraMetadata& request);
 
     status_t submitRequestsHelper(const List<const PhysicalCameraSettingsList> &requestsList,
-                                  const std::list<const SurfaceMap> &surfaceMaps,
+                                  const std::list<SurfaceMap> &surfaceMaps,
                                   bool repeating,
                                   int64_t *lastFrameNumber = NULL);
 
diff --git a/services/camera/libcameraservice/fuzzer/Android.bp b/services/camera/libcameraservice/fuzzer/Android.bp
index 7760f6a..667ba02 100644
--- a/services/camera/libcameraservice/fuzzer/Android.bp
+++ b/services/camera/libcameraservice/fuzzer/Android.bp
@@ -26,7 +26,18 @@
 cc_defaults {
     name: "libcameraservice_fuzz_defaults",
     fuzz_config: {
-        componentid: 41727
+        cc: [
+            "android-camera-fwk-eng@google.com",
+        ],
+        componentid: 41727,
+        hotlists: [
+            "4593311",
+        ],
+        description: "The fuzzer targets the APIs of libcameraservice",
+        vector: "local_no_privileges_required",
+        service_privilege: "privileged",
+        users: "multi_user",
+        fuzzed_code_usage: "shipped",
     },
 }
 
@@ -37,9 +48,9 @@
         "DistortionMapperFuzzer.cpp",
     ],
     shared_libs: [
-        "libcameraservice",
+        "camera_platform_flags_c_lib",
         "libcamera_client",
-        "camera_platform_flags_c_lib"
+        "libcameraservice",
     ],
 }
 
@@ -50,8 +61,8 @@
         "DepthProcessorFuzzer.cpp",
     ],
     shared_libs: [
+        "camera_platform_flags_c_lib",
         "libcameraservice",
-        "camera_platform_flags_c_lib"
     ],
     corpus: ["corpus/*.jpg"],
 }
diff --git a/services/camera/libcameraservice/fuzzer/DepthProcessorFuzzer.cpp b/services/camera/libcameraservice/fuzzer/DepthProcessorFuzzer.cpp
index 650ca91..5c5e177 100644
--- a/services/camera/libcameraservice/fuzzer/DepthProcessorFuzzer.cpp
+++ b/services/camera/libcameraservice/fuzzer/DepthProcessorFuzzer.cpp
@@ -14,49 +14,92 @@
  * limitations under the License.
  */
 
-#include <array>
-#include <vector>
+#include "common/DepthPhotoProcessor.h"
+
+#include <random>
 
 #include <fuzzer/FuzzedDataProvider.h>
 
-#include "common/DepthPhotoProcessor.h"
-
 using namespace android;
 using namespace android::camera3;
 
-static const size_t kTestBufferWidth = 640;
-static const size_t kTestBufferHeight = 480;
-static const size_t kTestBufferDepthSize (kTestBufferWidth * kTestBufferHeight);
+static const float kMinRatio = 0.1f;
+static const float kMaxRatio = 0.9f;
 
-void generateDepth16Buffer(const uint8_t* data, size_t size, std::array<uint16_t, kTestBufferDepthSize> *depth16Buffer /*out*/) {
-    FuzzedDataProvider dataProvider(data, size);
-    for (size_t i = 0; i < depth16Buffer->size(); i++) {
-        (*depth16Buffer)[i] = dataProvider.ConsumeIntegral<uint16_t>();
+static const uint8_t kTotalDepthJpegBufferCount = 3;
+static const uint8_t kIntrinsicCalibrationSize = 5;
+static const uint8_t kLensDistortionSize = 5;
+
+static const DepthPhotoOrientation kDepthPhotoOrientations[] = {
+        DepthPhotoOrientation::DEPTH_ORIENTATION_0_DEGREES,
+        DepthPhotoOrientation::DEPTH_ORIENTATION_90_DEGREES,
+        DepthPhotoOrientation::DEPTH_ORIENTATION_180_DEGREES,
+        DepthPhotoOrientation::DEPTH_ORIENTATION_270_DEGREES};
+
+void generateDepth16Buffer(std::vector<uint16_t>* depth16Buffer /*out*/, size_t length,
+                           FuzzedDataProvider& fdp) {
+    std::default_random_engine gen(fdp.ConsumeIntegral<uint8_t>());
+    std::uniform_int_distribution uniDist(0, UINT16_MAX - 1);
+    for (size_t i = 0; i < length; ++i) {
+        (*depth16Buffer)[i] = uniDist(gen);
     }
 }
 
 extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+    FuzzedDataProvider fdp(data, size);
+
     DepthPhotoInputFrame inputFrame;
+
+    /**
+     * Consuming 80% of the data to set mMainJpegBuffer. This ensures that we
+     * don't completely exhaust data and use the rest 20% for fuzzing of APIs.
+     */
+    std::vector<uint8_t> buffer = fdp.ConsumeBytes<uint8_t>((size * 80) / 100);
+    inputFrame.mMainJpegBuffer = reinterpret_cast<const char*>(buffer.data());
+
+    /**
+     * Calculate height and width based on buffer size and a ratio within [0.1, 0.9].
+     * The ratio adjusts the dimensions while maintaining a relationship to the total buffer size.
+     */
+    const float ratio = fdp.ConsumeFloatingPointInRange<float>(kMinRatio, kMaxRatio);
+    const size_t height = std::sqrt(buffer.size()) * ratio;
+    const size_t width = std::sqrt(buffer.size()) / ratio;
+
+    inputFrame.mMainJpegHeight = height;
+    inputFrame.mMainJpegWidth = width;
+    inputFrame.mMainJpegSize = buffer.size();
     // Worst case both depth and confidence maps have the same size as the main color image.
-    inputFrame.mMaxJpegSize = inputFrame.mMainJpegSize * 3;
+    inputFrame.mMaxJpegSize = inputFrame.mMainJpegSize * kTotalDepthJpegBufferCount;
+
+    std::vector<uint16_t> depth16Buffer(height * width);
+    generateDepth16Buffer(&depth16Buffer, height * width, fdp);
+    inputFrame.mDepthMapBuffer = depth16Buffer.data();
+    inputFrame.mDepthMapHeight = height;
+    inputFrame.mDepthMapWidth = inputFrame.mDepthMapStride = width;
+
+    inputFrame.mIsLogical = fdp.ConsumeBool();
+
+    inputFrame.mOrientation = fdp.PickValueInArray<DepthPhotoOrientation>(kDepthPhotoOrientations);
+
+    if (fdp.ConsumeBool()) {
+        for (uint8_t i = 0; i < kIntrinsicCalibrationSize; ++i) {
+            inputFrame.mIntrinsicCalibration[i] = fdp.ConsumeFloatingPoint<float>();
+        }
+        inputFrame.mIsIntrinsicCalibrationValid = 1;
+    }
+
+    if (fdp.ConsumeBool()) {
+        for (uint8_t i = 0; i < kLensDistortionSize; ++i) {
+            inputFrame.mLensDistortion[i] = fdp.ConsumeFloatingPoint<float>();
+        }
+        inputFrame.mIsLensDistortionValid = 1;
+    }
 
     std::vector<uint8_t> depthPhotoBuffer(inputFrame.mMaxJpegSize);
     size_t actualDepthPhotoSize = 0;
 
-    std::array<uint16_t, kTestBufferDepthSize> depth16Buffer;
-    generateDepth16Buffer(data, size, &depth16Buffer);
+    processDepthPhotoFrame(inputFrame, depthPhotoBuffer.size(), depthPhotoBuffer.data(),
+                           &actualDepthPhotoSize);
 
-    inputFrame.mMainJpegBuffer = reinterpret_cast<const char*> (data);
-    inputFrame.mMainJpegSize = size;
-    inputFrame.mDepthMapBuffer = depth16Buffer.data();
-    inputFrame.mDepthMapStride = kTestBufferWidth;
-    inputFrame.mDepthMapWidth = kTestBufferWidth;
-    inputFrame.mDepthMapHeight = kTestBufferHeight;
-    processDepthPhotoFrame(
-        inputFrame,
-        depthPhotoBuffer.size(),
-        depthPhotoBuffer.data(),
-        &actualDepthPhotoSize);
-
-  return 0;
+    return 0;
 }
diff --git a/services/camera/virtualcamera/util/JpegUtil.h b/services/camera/virtualcamera/util/JpegUtil.h
index 83ed74b..5d53269 100644
--- a/services/camera/virtualcamera/util/JpegUtil.h
+++ b/services/camera/virtualcamera/util/JpegUtil.h
@@ -18,6 +18,7 @@
 #define ANDROID_COMPANION_VIRTUALCAMERA_JPEGUTIL_H
 
 #include <optional>
+#include <vector>
 
 #include "system/graphics.h"
 
diff --git a/services/mediacodec/seccomp_policy/mediaswcodec-arm.policy b/services/mediacodec/seccomp_policy/mediaswcodec-arm.policy
index 41efce0..92f0745 100644
--- a/services/mediacodec/seccomp_policy/mediaswcodec-arm.policy
+++ b/services/mediacodec/seccomp_policy/mediaswcodec-arm.policy
@@ -85,5 +85,6 @@
 getegid32: 1
 getgroups32: 1
 sysinfo: 1
+setsockopt: 1
 
 @include /apex/com.android.media.swcodec/etc/seccomp_policy/code_coverage.arm.policy
diff --git a/services/mediametrics/include/mediametricsservice/TimedAction.h b/services/mediametrics/include/mediametricsservice/TimedAction.h
index 8b53ded..8901ced 100644
--- a/services/mediametrics/include/mediametricsservice/TimedAction.h
+++ b/services/mediametrics/include/mediametricsservice/TimedAction.h
@@ -81,9 +81,8 @@
     void threadLoop() NO_THREAD_SAFETY_ANALYSIS { // thread safety doesn't cover unique_lock
         std::unique_lock l(mLock);
         while (!mQuit) {
-            auto sleepUntilTime = std::chrono::time_point<TimerClock>::max();
             if (!mMap.empty()) {
-                sleepUntilTime = mMap.begin()->first;
+                auto sleepUntilTime = mMap.begin()->first;
                 const auto now = TimerClock::now();
                 if (sleepUntilTime <= now) {
                     auto node = mMap.extract(mMap.begin()); // removes from mMap.
@@ -96,8 +95,17 @@
                 // of REALTIME specification, use kWakeupInterval to ensure minimum
                 // granularity if suspended.
                 sleepUntilTime = std::min(sleepUntilTime, now + kWakeupInterval);
+                mCondition.wait_until(l, sleepUntilTime);
+            } else {
+                // As TimerClock is system_clock (which is not monotonic), libcxx's
+                // implementation of condition_variable::wait_until(l, std::chrono::time_point)
+                // recalculates the 'until' time into the wait duration and then goes back to the
+                // absolute timestamp when calling pthread_cond_timedwait(); this back-and-forth
+                // calculation sometimes loses the 'max' value because enough time passes in
+                // between, and instead passes incorrect timestamp into the syscall, causing a
+                // crash. Mitigating it by explicitly calling the non-timed wait here.
+                mCondition.wait(l);
             }
-            mCondition.wait_until(l, sleepUntilTime);
         }
     }