Fix build am: 6f85d83163 am: cea6041539 am: ac11d74c81 am: 94742171f1
am: b8d23091df

Change-Id: Ia03c876b68026f0a6d70b920893a6f485cc8ae32
diff --git a/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl b/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
index 1e8744b..96ecfa0 100644
--- a/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
+++ b/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
@@ -121,4 +121,6 @@
     void tearDown(int streamId);
 
     void prepare2(int maxCount, int streamId);
+
+    void setDeferredConfiguration(int streamId, in OutputConfiguration outputConfiguration);
 }
diff --git a/camera/camera2/OutputConfiguration.cpp b/camera/camera2/OutputConfiguration.cpp
index 3247d0d..38e1c01 100644
--- a/camera/camera2/OutputConfiguration.cpp
+++ b/camera/camera2/OutputConfiguration.cpp
@@ -42,9 +42,24 @@
     return mSurfaceSetID;
 }
 
+int OutputConfiguration::getSurfaceType() const {
+    return mSurfaceType;
+}
+
+int OutputConfiguration::getWidth() const {
+    return mWidth;
+}
+
+int OutputConfiguration::getHeight() const {
+    return mHeight;
+}
+
 OutputConfiguration::OutputConfiguration() :
         mRotation(INVALID_ROTATION),
-        mSurfaceSetID(INVALID_SET_ID) {
+        mSurfaceSetID(INVALID_SET_ID),
+        mSurfaceType(SURFACE_TYPE_UNKNOWN),
+        mWidth(0),
+        mHeight(0) {
 }
 
 OutputConfiguration::OutputConfiguration(const Parcel& parcel) :
@@ -70,18 +85,48 @@
         return err;
     }
 
+    int surfaceType = SURFACE_TYPE_UNKNOWN;
+    if ((err = parcel->readInt32(&surfaceType)) != OK) {
+        ALOGE("%s: Failed to read surface type from parcel", __FUNCTION__);
+        return err;
+    }
+
+    int width = 0;
+    if ((err = parcel->readInt32(&width)) != OK) {
+        ALOGE("%s: Failed to read surface width from parcel", __FUNCTION__);
+        return err;
+    }
+
+    int height = 0;
+    if ((err = parcel->readInt32(&height)) != OK) {
+        ALOGE("%s: Failed to read surface height from parcel", __FUNCTION__);
+        return err;
+    }
+
     view::Surface surfaceShim;
     if ((err = surfaceShim.readFromParcel(parcel)) != OK) {
-        ALOGE("%s: Failed to read surface from parcel", __FUNCTION__);
-        return err;
+        // Read surface failure for deferred surface configuration is expected.
+        if (surfaceType == SURFACE_TYPE_SURFACE_VIEW ||
+                surfaceType == SURFACE_TYPE_SURFACE_TEXTURE) {
+            ALOGV("%s: Get null surface from a deferred surface configuration (%dx%d)",
+                    __FUNCTION__, width, height);
+            err = OK;
+        } else {
+            ALOGE("%s: Failed to read surface from parcel", __FUNCTION__);
+            return err;
+        }
     }
 
     mGbp = surfaceShim.graphicBufferProducer;
     mRotation = rotation;
     mSurfaceSetID = setID;
+    mSurfaceType = surfaceType;
+    mWidth = width;
+    mHeight = height;
 
-    ALOGV("%s: OutputConfiguration: bp = %p, name = %s, rotation = %d, setId = %d", __FUNCTION__,
-            mGbp.get(), String8(surfaceShim.name).string(), mRotation, mSurfaceSetID);
+    ALOGV("%s: OutputConfiguration: bp = %p, name = %s, rotation = %d, setId = %d,"
+            "surfaceType = %d", __FUNCTION__, mGbp.get(), String8(surfaceShim.name).string(),
+            mRotation, mSurfaceSetID, mSurfaceType);
 
     return err;
 }
@@ -104,6 +149,15 @@
     err = parcel->writeInt32(mSurfaceSetID);
     if (err != OK) return err;
 
+    err = parcel->writeInt32(mSurfaceType);
+    if (err != OK) return err;
+
+    err = parcel->writeInt32(mWidth);
+    if (err != OK) return err;
+
+    err = parcel->writeInt32(mHeight);
+    if (err != OK) return err;
+
     view::Surface surfaceShim;
     surfaceShim.name = String16("unknown_name"); // name of surface
     surfaceShim.graphicBufferProducer = mGbp;
diff --git a/camera/cameraserver/cameraserver.rc b/camera/cameraserver/cameraserver.rc
index 16d9da8..c1dad2c 100644
--- a/camera/cameraserver/cameraserver.rc
+++ b/camera/cameraserver/cameraserver.rc
@@ -1,6 +1,7 @@
 service cameraserver /system/bin/cameraserver
     class main
     user cameraserver
-    group audio camera drmrpc inet media mediadrm net_bt net_bt_admin net_bw_acct
+    group audio camera input drmrpc
     ioprio rt 4
-    writepid /dev/cpuset/foreground/tasks
+    writepid /dev/cpuset/camera-daemon/tasks
+    writepid /dev/stune/top-app/tasks
diff --git a/include/camera/camera2/OutputConfiguration.h b/include/camera/camera2/OutputConfiguration.h
index 72a3753..cf8f3c6 100644
--- a/include/camera/camera2/OutputConfiguration.h
+++ b/include/camera/camera2/OutputConfiguration.h
@@ -33,10 +33,17 @@
 
     static const int INVALID_ROTATION;
     static const int INVALID_SET_ID;
+    enum SurfaceType{
+        SURFACE_TYPE_UNKNOWN = -1,
+        SURFACE_TYPE_SURFACE_VIEW = 0,
+        SURFACE_TYPE_SURFACE_TEXTURE = 1
+    };
     sp<IGraphicBufferProducer> getGraphicBufferProducer() const;
     int                        getRotation() const;
     int                        getSurfaceSetID() const;
-
+    int                        getSurfaceType() const;
+    int                        getWidth() const;
+    int                        getHeight() const;
     /**
      * Keep impl up-to-date with OutputConfiguration.java in frameworks/base
      */
@@ -60,7 +67,10 @@
     bool operator == (const OutputConfiguration& other) const {
         return (mGbp == other.mGbp &&
                 mRotation == other.mRotation &&
-                mSurfaceSetID == other.mSurfaceSetID);
+                mSurfaceSetID == other.mSurfaceSetID &&
+                mSurfaceType == other.mSurfaceType &&
+                mWidth == other.mWidth &&
+                mHeight == other.mHeight);
     }
     bool operator != (const OutputConfiguration& other) const {
         return !(*this == other);
@@ -71,6 +81,16 @@
         if (mSurfaceSetID != other.mSurfaceSetID) {
             return mSurfaceSetID < other.mSurfaceSetID;
         }
+        if (mSurfaceType != other.mSurfaceType) {
+            return mSurfaceType < other.mSurfaceType;
+        }
+        if (mWidth != other.mWidth) {
+            return mWidth < other.mWidth;
+        }
+        if (mHeight != other.mHeight) {
+            return mHeight < other.mHeight;
+        }
+
         return mRotation < other.mRotation;
     }
     bool operator > (const OutputConfiguration& other) const {
@@ -81,6 +101,9 @@
     sp<IGraphicBufferProducer> mGbp;
     int                        mRotation;
     int                        mSurfaceSetID;
+    int                        mSurfaceType;
+    int                        mWidth;
+    int                        mHeight;
     // helper function
     static String16 readMaybeEmptyString16(const Parcel* parcel);
 };
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index 2fa1a4e..63076e9 100644
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -491,6 +491,9 @@
      */
             uint32_t    getInputFramesLost() const;
 
+    /* Get the flags */
+            audio_input_flags_t getFlags() const { AutoMutex _l(mLock); return mFlags; }
+
 private:
     /* copying audio record objects is not allowed */
                         AudioRecord(const AudioRecord& other);
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index 984bc02..096f7ef 100644
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -44,16 +44,6 @@
 public:
     DECLARE_META_INTERFACE(AudioFlinger);
 
-    // or-able bits shared by createTrack and openRecord, but not all combinations make sense
-    enum {
-        TRACK_DEFAULT = 0,  // client requests a default AudioTrack
-        // FIXME: obsolete
-        // TRACK_TIMED= 1,  // client requests a TimedAudioTrack
-        TRACK_FAST    = 2,  // client requests a fast AudioTrack or AudioRecord
-        TRACK_OFFLOAD = 4,  // client requests offload to hw codec
-        TRACK_DIRECT = 8,   // client requests a direct output
-    };
-    typedef uint32_t track_flags_t;
 
     // invariant on exit for all APIs that return an sp<>:
     //   (return value != 0) == (*status == NO_ERROR)
@@ -67,7 +57,7 @@
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
                                 size_t *pFrameCount,
-                                track_flags_t *flags,
+                                audio_output_flags_t *flags,
                                 const sp<IMemory>& sharedBuffer,
                                 // On successful return, AudioFlinger takes over the handle
                                 // reference and will release it when the track is destroyed.
@@ -89,7 +79,7 @@
                                 audio_channel_mask_t channelMask,
                                 const String16& callingPackage,
                                 size_t *pFrameCount,
-                                track_flags_t *flags,
+                                audio_input_flags_t *flags,
                                 pid_t pid,
                                 pid_t tid,  // -1 means unused, otherwise must be valid non-0
                                 int clientUid,
diff --git a/include/media/IMediaSource.h b/include/media/IMediaSource.h
index 709f425..524e7aa 100644
--- a/include/media/IMediaSource.h
+++ b/include/media/IMediaSource.h
@@ -114,6 +114,9 @@
     virtual status_t readMultiple(
             Vector<MediaBuffer *> *buffers, uint32_t maxNumBuffers = 1) = 0;
 
+    // Returns true if |readMultiple| is supported, otherwise false.
+    virtual bool supportReadMultiple() = 0;
+
     // Causes this source to suspend pulling data from its upstream source
     // until a subsequent read-with-seek. Currently only supported by
     // OMXCodec.
@@ -148,6 +151,10 @@
             Vector<MediaBuffer *> * /* buffers */, uint32_t /* maxNumBuffers = 1 */) {
         return ERROR_UNSUPPORTED;
     }
+
+    virtual bool supportReadMultiple() {
+        return false;
+    }
 protected:
     virtual ~BnMediaSource();
 
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index 4f2517c..94d2896 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -371,6 +371,10 @@
             int32_t width, int32_t height,
             OMX_VIDEO_CODINGTYPE compressionFormat, float frameRate = -1.0);
 
+    // sets |portIndex| port buffer numbers to be |bufferNum|. NOTE: Component could reject
+    // this setting if the |bufferNum| is less than the minimum buffer num of the port.
+    status_t setPortBufferNum(OMX_U32 portIndex, int bufferNum);
+
     // gets index or sets it to 0 on error. Returns error from codec.
     status_t initDescribeColorAspectsIndex();
 
@@ -480,6 +484,12 @@
     status_t getIntraRefreshPeriod(uint32_t *intraRefreshPeriod);
     status_t setIntraRefreshPeriod(uint32_t intraRefreshPeriod, bool inConfigure);
 
+    // Configures temporal layering based on |msg|. |inConfigure| shall be true iff this is called
+    // during configure() call. on success the configured layering is set in |outputFormat|. If
+    // |outputFormat| is mOutputFormat, it is copied to trigger an output format changed event.
+    status_t configureTemporalLayers(
+            const sp<AMessage> &msg, bool inConfigure, sp<AMessage> &outputFormat);
+
     status_t setMinBufferSize(OMX_U32 portIndex, size_t size);
 
     status_t setupMPEG4EncoderParameters(const sp<AMessage> &msg);
diff --git a/include/media/stagefright/ColorConverter.h b/include/media/stagefright/ColorConverter.h
index 85ba920..270c809 100644
--- a/include/media/stagefright/ColorConverter.h
+++ b/include/media/stagefright/ColorConverter.h
@@ -70,6 +70,9 @@
     status_t convertYUV420Planar(
             const BitmapParams &src, const BitmapParams &dst);
 
+    status_t convertYUV420PlanarUseLibYUV(
+            const BitmapParams &src, const BitmapParams &dst);
+
     status_t convertQCOMYUV420SemiPlanar(
             const BitmapParams &src, const BitmapParams &dst);
 
diff --git a/include/media/stagefright/MPEG4Writer.h b/include/media/stagefright/MPEG4Writer.h
index a6901a8..8f0eaa7 100644
--- a/include/media/stagefright/MPEG4Writer.h
+++ b/include/media/stagefright/MPEG4Writer.h
@@ -65,6 +65,7 @@
 
     status_t setGeoData(int latitudex10000, int longitudex10000);
     status_t setCaptureRate(float captureFps);
+    status_t setTemporalLayerCount(uint32_t layerCount);
     virtual void setStartTimeOffsetMs(int ms) { mStartTimeOffsetMs = ms; }
     virtual int32_t getStartTimeOffsetMs() const { return mStartTimeOffsetMs; }
 
@@ -187,6 +188,7 @@
     // Acquire lock before calling these methods
     off64_t addSample_l(MediaBuffer *buffer);
     off64_t addLengthPrefixedSample_l(MediaBuffer *buffer);
+    off64_t addMultipleLengthPrefixedSamples_l(MediaBuffer *buffer);
 
     bool exceedsFileSizeLimit();
     bool use32BitFileOffset() const;
diff --git a/include/media/stagefright/MetaData.h b/include/media/stagefright/MetaData.h
index be7e5c1..6ba7b32 100644
--- a/include/media/stagefright/MetaData.h
+++ b/include/media/stagefright/MetaData.h
@@ -204,6 +204,8 @@
                                    // transfer Function, value defined by ColorAspects.Transfer.
     kKeyColorMatrix      = 'cMtx', // int32_t,
                                    // color Matrix, value defined by ColorAspects.MatrixCoeffs.
+    kKeyTemporalLayerId  = 'iLyr', // int32_t, temporal layer-id. 0-based (0 => base layer)
+    kKeyTemporalLayerCount = 'cLyr', // int32_t, number of temporal layers encoded
 };
 
 enum {
diff --git a/media/common_time/Android.mk b/media/common_time/Android.mk
index 632acbc..aaa0db2 100644
--- a/media/common_time/Android.mk
+++ b/media/common_time/Android.mk
@@ -19,4 +19,6 @@
                           libutils \
                           liblog
 
+LOCAL_CFLAGS := -Wall -Werror
+
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/common_time/cc_helper.cpp b/media/common_time/cc_helper.cpp
index 8d8556c..222b7ce 100644
--- a/media/common_time/cc_helper.cpp
+++ b/media/common_time/cc_helper.cpp
@@ -80,7 +80,7 @@
     }
 }
 
-void CCHelper::CommonClockListener::onTimelineChanged(uint64_t timelineID) {
+void CCHelper::CommonClockListener::onTimelineChanged(uint64_t timelineID __unused) {
     // do nothing; listener is only really used as a token so the server can
     // find out when clients die.
 }
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index d9bb856..9a87023 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -593,11 +593,10 @@
     size_t notificationFrames = mNotificationFramesReq;
     size_t frameCount = mReqFrameCount;
 
-    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
+    audio_input_flags_t flags = mFlags;
 
     pid_t tid = -1;
     if (mFlags & AUDIO_INPUT_FLAG_FAST) {
-        trackFlags |= IAudioFlinger::TRACK_FAST;
         if (mAudioRecordThread != 0) {
             tid = mAudioRecordThread->getTid();
         }
@@ -615,7 +614,7 @@
                                                        mChannelMask,
                                                        opPackageName,
                                                        &temp,
-                                                       &trackFlags,
+                                                       &flags,
                                                        mClientPid,
                                                        tid,
                                                        mClientUid,
@@ -638,7 +637,7 @@
 
     mAwaitBoost = false;
     if (mFlags & AUDIO_INPUT_FLAG_FAST) {
-        if (trackFlags & IAudioFlinger::TRACK_FAST) {
+        if (flags & AUDIO_INPUT_FLAG_FAST) {
             ALOGI("AUDIO_INPUT_FLAG_FAST successful; frameCount %zu", frameCount);
             mAwaitBoost = true;
         } else {
@@ -648,6 +647,7 @@
             continue;   // retry
         }
     }
+    mFlags = flags;
 
     if (iMem == 0) {
         ALOGE("Could not get control block");
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index e8da341..fad8350 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -1372,24 +1372,15 @@
         }
     }
 
-    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
+    audio_output_flags_t flags = mFlags;
 
     pid_t tid = -1;
     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
-        trackFlags |= IAudioFlinger::TRACK_FAST;
         if (mAudioTrackThread != 0 && !mThreadCanCallJava) {
             tid = mAudioTrackThread->getTid();
         }
     }
 
-    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
-        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
-    }
-
-    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
-        trackFlags |= IAudioFlinger::TRACK_DIRECT;
-    }
-
     size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
                                 // but we will still need the original value also
     audio_session_t originalSessionId = mSessionId;
@@ -1398,7 +1389,7 @@
                                                       mFormat,
                                                       mChannelMask,
                                                       &temp,
-                                                      &trackFlags,
+                                                      &flags,
                                                       mSharedBuffer,
                                                       output,
                                                       mClientPid,
@@ -1451,23 +1442,23 @@
 
     mAwaitBoost = false;
     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
-        if (trackFlags & IAudioFlinger::TRACK_FAST) {
+        if (flags & AUDIO_OUTPUT_FLAG_FAST) {
             ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu", frameCount);
             if (!mThreadCanCallJava) {
                 mAwaitBoost = true;
             }
         } else {
             ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
-            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
         }
     }
+    mFlags = flags;
 
     // Make sure that application is notified with sufficient margin before underrun.
     // The client can divide the AudioTrack buffer into sub-buffers,
     // and expresses its desire to server as the notification frame count.
     if (mSharedBuffer == 0 && audio_is_linear_pcm(mFormat)) {
         size_t maxNotificationFrames;
-        if (trackFlags & IAudioFlinger::TRACK_FAST) {
+        if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
             // notify every HAL buffer, regardless of the size of the track buffer
             maxNotificationFrames = afFrameCountHAL;
         } else {
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index 92e65e4..900d418 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -101,7 +101,7 @@
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
                                 size_t *pFrameCount,
-                                track_flags_t *flags,
+                                audio_output_flags_t *flags,
                                 const sp<IMemory>& sharedBuffer,
                                 audio_io_handle_t output,
                                 pid_t pid,
@@ -119,7 +119,7 @@
         data.writeInt32(channelMask);
         size_t frameCount = pFrameCount != NULL ? *pFrameCount : 0;
         data.writeInt64(frameCount);
-        track_flags_t lFlags = flags != NULL ? *flags : (track_flags_t) TRACK_DEFAULT;
+        audio_output_flags_t lFlags = flags != NULL ? *flags : AUDIO_OUTPUT_FLAG_NONE;
         data.writeInt32(lFlags);
         // haveSharedBuffer
         if (sharedBuffer != 0) {
@@ -145,7 +145,7 @@
             if (pFrameCount != NULL) {
                 *pFrameCount = frameCount;
             }
-            lFlags = reply.readInt32();
+            lFlags = (audio_output_flags_t)reply.readInt32();
             if (flags != NULL) {
                 *flags = lFlags;
             }
@@ -180,7 +180,7 @@
                                 audio_channel_mask_t channelMask,
                                 const String16& opPackageName,
                                 size_t *pFrameCount,
-                                track_flags_t *flags,
+                                audio_input_flags_t *flags,
                                 pid_t pid,
                                 pid_t tid,
                                 int clientUid,
@@ -200,7 +200,7 @@
         data.writeString16(opPackageName);
         size_t frameCount = pFrameCount != NULL ? *pFrameCount : 0;
         data.writeInt64(frameCount);
-        track_flags_t lFlags = flags != NULL ? *flags : (track_flags_t) TRACK_DEFAULT;
+        audio_input_flags_t lFlags = flags != NULL ? *flags : AUDIO_INPUT_FLAG_NONE;
         data.writeInt32(lFlags);
         data.writeInt32((int32_t) pid);
         data.writeInt32((int32_t) tid);
@@ -221,7 +221,7 @@
             if (pFrameCount != NULL) {
                 *pFrameCount = frameCount;
             }
-            lFlags = reply.readInt32();
+            lFlags = (audio_input_flags_t)reply.readInt32();
             if (flags != NULL) {
                 *flags = lFlags;
             }
@@ -947,7 +947,7 @@
             audio_format_t format = (audio_format_t) data.readInt32();
             audio_channel_mask_t channelMask = data.readInt32();
             size_t frameCount = data.readInt64();
-            track_flags_t flags = (track_flags_t) data.readInt32();
+            audio_output_flags_t flags = (audio_output_flags_t) data.readInt32();
             bool haveSharedBuffer = data.readInt32() != 0;
             sp<IMemory> buffer;
             if (haveSharedBuffer) {
@@ -986,7 +986,7 @@
             audio_channel_mask_t channelMask = data.readInt32();
             const String16& opPackageName = data.readString16();
             size_t frameCount = data.readInt64();
-            track_flags_t flags = (track_flags_t) data.readInt32();
+            audio_input_flags_t flags = (audio_input_flags_t) data.readInt32();
             pid_t pid = (pid_t) data.readInt32();
             pid_t tid = (pid_t) data.readInt32();
             int clientUid = data.readInt32();
diff --git a/media/libmedia/IMediaSource.cpp b/media/libmedia/IMediaSource.cpp
index 7e40e4f..d2b4291 100644
--- a/media/libmedia/IMediaSource.cpp
+++ b/media/libmedia/IMediaSource.cpp
@@ -221,6 +221,10 @@
         return ret;
     }
 
+    bool supportReadMultiple() {
+        return true;
+    }
+
     virtual status_t pause() {
         ALOGV("pause");
         Parcel data, reply;
diff --git a/media/libmedia/ToneGenerator.cpp b/media/libmedia/ToneGenerator.cpp
index 411519d..f352d5b 100644
--- a/media/libmedia/ToneGenerator.cpp
+++ b/media/libmedia/ToneGenerator.cpp
@@ -831,14 +831,15 @@
     mProcessSize = (mSamplingRate * 20) / 1000;
 
     char value[PROPERTY_VALUE_MAX];
-    property_get("gsm.operator.iso-country", value, "");
-    if (strcmp(value,"us") == 0 ||
-        strcmp(value,"ca") == 0) {
+    if (property_get("gsm.operator.iso-country", value, "") == 0) {
+        property_get("gsm.sim.operator.iso-country", value, "");
+    }
+    if (strstr(value, "us") != NULL ||
+        strstr(value, "ca") != NULL) {
         mRegion = ANSI;
-    } else if (strcmp(value,"jp") == 0) {
+    } else if (strstr(value, "jp") != NULL) {
         mRegion = JAPAN;
-    } else if (strcmp(value,"uk") == 0 ||
-               strcmp(value,"uk,uk") == 0) {
+    } else if (strstr(value, "uk") != NULL) {
         mRegion = UK;
     } else {
         mRegion = CEPT;
@@ -876,6 +877,7 @@
         ALOGV("Delete Track: %p", mpAudioTrack.get());
         mpAudioTrack.clear();
     }
+    clearWaveGens();
 }
 
 ////////////////////////////////////////////////////////////////////////////////
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 8725dfe..fbe749c 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -395,7 +395,9 @@
         return BAD_VALUE;
     }
     Mutex::Autolock _l(mLock);
-    if (mPlayer == 0) return INVALID_OPERATION;
+    if (mPlayer == 0 || (mCurrentState & MEDIA_PLAYER_STOPPED)) {
+        return INVALID_OPERATION;
+    }
 
     if (rate.mSpeed != 0.f && !(mCurrentState & MEDIA_PLAYER_STARTED)
             && (mCurrentState & (MEDIA_PLAYER_PREPARED | MEDIA_PLAYER_PAUSED
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 97ba76b..b03ae3f 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -22,6 +22,8 @@
 #include "WebmWriter.h"
 #include "StagefrightRecorder.h"
 
+#include <algorithm>
+
 #include <android/hardware/ICamera.h>
 
 #include <binder/IPCThreadState.h>
@@ -820,6 +822,9 @@
             break;
     }
 
+    ALOGV("Recording frameRate: %d captureFps: %f",
+            mFrameRate, mCaptureFps);
+
     return status;
 }
 
@@ -1562,9 +1567,23 @@
         format->setInt32("level", mVideoEncoderLevel);
     }
 
+    uint32_t tsLayers = 0;
     format->setInt32("priority", 0 /* realtime */);
     if (mCaptureFpsEnable) {
         format->setFloat("operating-rate", mCaptureFps);
+
+        // enable layering for all time lapse and high frame rate recordings
+        if (mFrameRate / mCaptureFps >= 1.9 || mCaptureFps / mFrameRate >= 1.9) {
+            tsLayers = 2; // use at least two layers
+            for (float fps = mCaptureFps / 1.9; fps > mFrameRate; fps /= 2) {
+                ++tsLayers;
+            }
+
+            uint32_t bLayers = std::min(2u, tsLayers - 1); // use up-to 2 B-layers
+            uint32_t pLayers = tsLayers - bLayers;
+            format->setString(
+                    "ts-schema", AStringPrintf("android.generic.%u+%u", pLayers, bLayers));
+        }
     }
 
     if (mMetaDataStoredInVideoBuffers != kMetadataBufferTypeInvalid) {
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 56042d4..1a0539d 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -1298,6 +1298,13 @@
     }
 #endif
 
+    if (trackType == MEDIA_TRACK_TYPE_VIDEO) {
+        int32_t layerId;
+        if (mb->meta_data()->findInt32(kKeyTemporalLayerId, &layerId)) {
+            meta->setInt32("temporal-layer-id", layerId);
+        }
+    }
+
     if (trackType == MEDIA_TRACK_TYPE_TIMEDTEXT) {
         const char *mime;
         CHECK(mTimedTextTrack.mSource != NULL
@@ -1420,7 +1427,9 @@
         options.setNonBlocking();
     }
 
-    bool couldReadMultiple = (!mIsWidevine && trackType == MEDIA_TRACK_TYPE_AUDIO);
+    bool couldReadMultiple =
+        (!mIsWidevine && trackType == MEDIA_TRACK_TYPE_AUDIO
+                && track->mSource->supportReadMultiple());
     for (size_t numBuffers = 0; numBuffers < maxBuffers; ) {
         Vector<MediaBuffer *> mediaBuffers;
         status_t err = NO_ERROR;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 0b10ae4..134da14 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -784,12 +784,9 @@
             }
 
             if (mVideoDecoder != NULL) {
-                float rate = getFrameRate();
-                if (rate > 0) {
-                    sp<AMessage> params = new AMessage();
-                    params->setFloat("operating-rate", rate * mPlaybackSettings.mSpeed);
-                    mVideoDecoder->setParameters(params);
-                }
+                sp<AMessage> params = new AMessage();
+                params->setFloat("playback-speed", mPlaybackSettings.mSpeed);
+                mVideoDecoder->setParameters(params);
             }
 
             sp<AMessage> response = new AMessage;
@@ -1005,6 +1002,7 @@
                 sp<AMessage> inputFormat =
                         mSource->getFormat(false /* audio */);
 
+                setVideoScalingMode(mVideoScalingMode);
                 updateVideoSize(inputFormat, format);
             } else if (what == DecoderBase::kWhatShutdownCompleted) {
                 ALOGV("%s shutdown completed", audio ? "audio" : "video");
@@ -1679,6 +1677,27 @@
             return err;
         }
     }
+
+    if (!audio) {
+        sp<AMessage> params = new AMessage();
+        float rate = getFrameRate();
+        if (rate > 0) {
+            params->setFloat("frame-rate-total", rate);
+        }
+
+        sp<MetaData> fileMeta = getFileMeta();
+        if (fileMeta != NULL) {
+            int32_t videoTemporalLayerCount;
+            if (fileMeta->findInt32(kKeyTemporalLayerCount, &videoTemporalLayerCount)
+                    && videoTemporalLayerCount > 0) {
+                params->setInt32("temporal-layer-count", videoTemporalLayerCount);
+            }
+        }
+
+        if (params->countEntries() > 0) {
+            (*decoder)->setParameters(params);
+        }
+    }
     return OK;
 }
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 4678956..fa19410 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -41,6 +41,12 @@
 
 namespace android {
 
+static float kDisplayRefreshingRate = 60.f;
+
+// The default total video frame rate of a stream when that info is not available from
+// the source.
+static float kDefaultVideoFrameRateTotal = 30.f;
+
 static inline bool getAudioDeepBufferSetting() {
     return property_get_bool("media.stagefright.audio.deep", false /* default_value */);
 }
@@ -69,11 +75,17 @@
       mIsSecure(false),
       mFormatChangePending(false),
       mTimeChangePending(false),
+      mFrameRateTotal(kDefaultVideoFrameRateTotal),
+      mPlaybackSpeed(1.0f),
+      mNumVideoTemporalLayerTotal(1),
+      mNumVideoTemporalLayerAllowed(1),
+      mCurrentMaxVideoTemporalLayerId(0),
       mResumePending(false),
       mComponentName("decoder") {
     mCodecLooper = new ALooper;
     mCodecLooper->setName("NPDecoder-CL");
     mCodecLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
+    mVideoTemporalLayerAggregateFps[0] = mFrameRateTotal;
 }
 
 NuPlayer::Decoder::~Decoder() {
@@ -329,11 +341,73 @@
 }
 
 void NuPlayer::Decoder::onSetParameters(const sp<AMessage> &params) {
-    if (mCodec == NULL) {
-        ALOGW("onSetParameters called before codec is created.");
-        return;
+    bool needAdjustLayers = false;
+    float frameRateTotal;
+    if (params->findFloat("frame-rate-total", &frameRateTotal)
+            && mFrameRateTotal != frameRateTotal) {
+        needAdjustLayers = true;
+        mFrameRateTotal = frameRateTotal;
     }
-    mCodec->setParameters(params);
+
+    int32_t numVideoTemporalLayerTotal;
+    if (params->findInt32("temporal-layer-count", &numVideoTemporalLayerTotal)
+            && numVideoTemporalLayerTotal > 0
+            && numVideoTemporalLayerTotal <= kMaxNumVideoTemporalLayers
+            && mNumVideoTemporalLayerTotal != numVideoTemporalLayerTotal) {
+        needAdjustLayers = true;
+        mNumVideoTemporalLayerTotal = numVideoTemporalLayerTotal;
+    }
+
+    if (needAdjustLayers) {
+        // TODO: For now, layer fps is calculated for some specific architectures.
+        // But it really should be extracted from the stream.
+        mVideoTemporalLayerAggregateFps[0] =
+            mFrameRateTotal / (float)(1ll << (mNumVideoTemporalLayerTotal - 1));
+        for (int32_t i = 1; i < mNumVideoTemporalLayerTotal; ++i) {
+            mVideoTemporalLayerAggregateFps[i] =
+                mFrameRateTotal / (float)(1ll << (mNumVideoTemporalLayerTotal - i))
+                + mVideoTemporalLayerAggregateFps[i - 1];
+        }
+    }
+
+    float playbackSpeed;
+    if (params->findFloat("playback-speed", &playbackSpeed)
+            && mPlaybackSpeed != playbackSpeed) {
+        needAdjustLayers = true;
+        mPlaybackSpeed = playbackSpeed;
+    }
+
+    if (needAdjustLayers) {
+        int32_t layerId;
+        for (layerId = 0; layerId < mNumVideoTemporalLayerTotal; ++layerId) {
+            if (mVideoTemporalLayerAggregateFps[layerId] * mPlaybackSpeed
+                    > kDisplayRefreshingRate) {
+                --layerId;
+                break;
+            }
+        }
+        if (layerId < 0) {
+            layerId = 0;
+        } else if (layerId >= mNumVideoTemporalLayerTotal) {
+            layerId = mNumVideoTemporalLayerTotal - 1;
+        }
+        mNumVideoTemporalLayerAllowed = layerId + 1;
+        if (mCurrentMaxVideoTemporalLayerId > layerId) {
+            mCurrentMaxVideoTemporalLayerId = layerId;
+        }
+        ALOGV("onSetParameters: allowed layers=%d, current max layerId=%d",
+                mNumVideoTemporalLayerAllowed, mCurrentMaxVideoTemporalLayerId);
+
+        if (mCodec == NULL) {
+            ALOGW("onSetParameters called before codec is created.");
+            return;
+        }
+
+        sp<AMessage> codecParams = new AMessage();
+        codecParams->setFloat("operating-rate",
+                mVideoTemporalLayerAggregateFps[layerId] * mPlaybackSpeed);
+        mCodec->setParameters(codecParams);
+    }
 }
 
 void NuPlayer::Decoder::onSetRenderer(const sp<Renderer> &renderer) {
@@ -742,13 +816,27 @@
         }
 
         dropAccessUnit = false;
-        if (!mIsAudio
-                && !mIsSecure
-                && mRenderer->getVideoLateByUs() > 100000ll
-                && mIsVideoAVC
-                && !IsAVCReferenceFrame(accessUnit)) {
-            dropAccessUnit = true;
-            ++mNumInputFramesDropped;
+        if (!mIsAudio && !mIsSecure) {
+            int32_t layerId = 0;
+            if (mRenderer->getVideoLateByUs() > 100000ll
+                    && mIsVideoAVC
+                    && !IsAVCReferenceFrame(accessUnit)) {
+                dropAccessUnit = true;
+            } else if (accessUnit->meta()->findInt32("temporal-layer-id", &layerId)) {
+                // Add only one layer each time.
+                if (layerId > mCurrentMaxVideoTemporalLayerId + 1
+                        || layerId >= mNumVideoTemporalLayerAllowed) {
+                    dropAccessUnit = true;
+                    ALOGV("dropping layer(%d), speed=%g, allowed layer count=%d, max layerId=%d",
+                            layerId, mPlaybackSpeed, mNumVideoTemporalLayerAllowed,
+                            mCurrentMaxVideoTemporalLayerId);
+                } else if (layerId > mCurrentMaxVideoTemporalLayerId) {
+                    mCurrentMaxVideoTemporalLayerId = layerId;
+                }
+            }
+            if (dropAccessUnit) {
+                ++mNumInputFramesDropped;
+            }
         }
     } while (dropAccessUnit);
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
index ae08b4b..0c619ed 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
@@ -57,6 +57,10 @@
         kWhatSetVideoSurface     = 'sSur'
     };
 
+    enum {
+        kMaxNumVideoTemporalLayers = 32,
+    };
+
     sp<Surface> mSurface;
 
     sp<Source> mSource;
@@ -90,6 +94,12 @@
     bool mIsSecure;
     bool mFormatChangePending;
     bool mTimeChangePending;
+    float mFrameRateTotal;
+    float mPlaybackSpeed;
+    int32_t mNumVideoTemporalLayerTotal;
+    int32_t mNumVideoTemporalLayerAllowed;
+    int32_t mCurrentMaxVideoTemporalLayerId;
+    float mVideoTemporalLayerAggregateFps[kMaxNumVideoTemporalLayers];
 
     bool mResumePending;
     AString mComponentName;
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index d97d5b1..0d368e6 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -2462,6 +2462,109 @@
     return OK;
 }
 
+status_t ACodec::configureTemporalLayers(
+        const sp<AMessage> &msg, bool inConfigure, sp<AMessage> &outputFormat) {
+    if (!mIsVideo || !mIsEncoder) {
+        return INVALID_OPERATION;
+    }
+
+    AString tsSchema;
+    if (!msg->findString("ts-schema", &tsSchema)) {
+        return OK;
+    }
+
+    unsigned int numLayers = 0;
+    unsigned int numBLayers = 0;
+    int tags;
+    char dummy;
+    OMX_VIDEO_ANDROID_TEMPORALLAYERINGPATTERNTYPE pattern =
+        OMX_VIDEO_AndroidTemporalLayeringPatternNone;
+    if (sscanf(tsSchema.c_str(), "webrtc.vp8.%u-layer%c", &numLayers, &dummy) == 1
+            && numLayers > 0) {
+        pattern = OMX_VIDEO_AndroidTemporalLayeringPatternWebRTC;
+    } else if ((tags = sscanf(tsSchema.c_str(), "android.generic.%u%c%u%c",
+                    &numLayers, &dummy, &numBLayers, &dummy))
+            && (tags == 1 || (tags == 3 && dummy == '+'))
+            && numLayers > 0 && numLayers < UINT32_MAX - numBLayers) {
+        numLayers += numBLayers;
+        pattern = OMX_VIDEO_AndroidTemporalLayeringPatternAndroid;
+    } else {
+        ALOGI("Ignoring unsupported ts-schema [%s]", tsSchema.c_str());
+        return BAD_VALUE;
+    }
+
+    OMX_VIDEO_PARAM_ANDROID_TEMPORALLAYERINGTYPE layerParams;
+    InitOMXParams(&layerParams);
+    layerParams.nPortIndex = kPortIndexOutput;
+
+    status_t err = mOMX->getParameter(
+        mNode, (OMX_INDEXTYPE)OMX_IndexParamAndroidVideoTemporalLayering,
+        &layerParams, sizeof(layerParams));
+
+    if (err != OK) {
+        return err;
+    } else if (!(layerParams.eSupportedPatterns & pattern)) {
+        return BAD_VALUE;
+    }
+
+    numLayers = min(numLayers, layerParams.nLayerCountMax);
+    numBLayers = min(numBLayers, layerParams.nBLayerCountMax);
+
+    if (!inConfigure) {
+        OMX_VIDEO_CONFIG_ANDROID_TEMPORALLAYERINGTYPE layerConfig;
+        InitOMXParams(&layerConfig);
+        layerConfig.nPortIndex = kPortIndexOutput;
+        layerConfig.ePattern = pattern;
+        layerConfig.nPLayerCountActual = numLayers - numBLayers;
+        layerConfig.nBLayerCountActual = numBLayers;
+        layerConfig.bBitrateRatiosSpecified = OMX_FALSE;
+
+        err = mOMX->setConfig(
+                mNode, (OMX_INDEXTYPE)OMX_IndexConfigAndroidVideoTemporalLayering,
+                &layerConfig, sizeof(layerConfig));
+    } else {
+        layerParams.ePattern = pattern;
+        layerParams.nPLayerCountActual = numLayers - numBLayers;
+        layerParams.nBLayerCountActual = numBLayers;
+        layerParams.bBitrateRatiosSpecified = OMX_FALSE;
+
+        err = mOMX->setParameter(
+                mNode, (OMX_INDEXTYPE)OMX_IndexParamAndroidVideoTemporalLayering,
+                &layerParams, sizeof(layerParams));
+    }
+
+    AString configSchema;
+    if (pattern == OMX_VIDEO_AndroidTemporalLayeringPatternAndroid) {
+        configSchema = AStringPrintf("android.generic.%u+%u", numLayers - numBLayers, numBLayers);
+    } else if (pattern == OMX_VIDEO_AndroidTemporalLayeringPatternWebRTC) {
+        configSchema = AStringPrintf("webrtc.vp8.%u", numLayers);
+    }
+
+    if (err != OK) {
+        ALOGW("Failed to set temporal layers to %s (requested %s)",
+                configSchema.c_str(), tsSchema.c_str());
+        return err;
+    }
+
+    err = mOMX->getParameter(
+            mNode, (OMX_INDEXTYPE)OMX_IndexParamAndroidVideoTemporalLayering,
+            &layerParams, sizeof(layerParams));
+
+    if (err == OK) {
+        ALOGD("Temporal layers requested:%s configured:%s got:%s(%u: P=%u, B=%u)",
+                tsSchema.c_str(), configSchema.c_str(),
+                asString(layerParams.ePattern), layerParams.ePattern,
+                layerParams.nPLayerCountActual, layerParams.nBLayerCountActual);
+
+        if (outputFormat.get() == mOutputFormat.get()) {
+            mOutputFormat = mOutputFormat->dup(); // trigger an output format change event
+        }
+        // assume we got what we configured
+        outputFormat->setString("ts-schema", configSchema);
+    }
+    return err;
+}
+
 status_t ACodec::setMinBufferSize(OMX_U32 portIndex, size_t size) {
     OMX_PARAM_PORTDEFINITIONTYPE def;
     InitOMXParams(&def);
@@ -3146,6 +3249,29 @@
     return ERROR_UNSUPPORTED;
 }
 
+status_t ACodec::setPortBufferNum(OMX_U32 portIndex, int bufferNum) {
+    OMX_PARAM_PORTDEFINITIONTYPE def;
+    InitOMXParams(&def);
+    def.nPortIndex = portIndex;
+    status_t err;
+    ALOGD("Setting [%s] %s port buffer number: %d", mComponentName.c_str(),
+            portIndex == kPortIndexInput ? "input" : "output", bufferNum);
+    err = mOMX->getParameter(
+        mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+    if (err != OK) {
+        return err;
+    }
+    def.nBufferCountActual = bufferNum;
+    err = mOMX->setParameter(
+        mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+    if (err != OK) {
+        // Component could reject this request.
+        ALOGW("Fail to set [%s] %s port buffer number: %d", mComponentName.c_str(),
+            portIndex == kPortIndexInput ? "input" : "output", bufferNum);
+    }
+    return OK;
+}
+
 status_t ACodec::setupVideoDecoder(
         const char *mime, const sp<AMessage> &msg, bool haveNativeWindow,
         bool usingSwRenderer, sp<AMessage> &outputFormat) {
@@ -3202,6 +3328,24 @@
         return err;
     }
 
+    // Set the component input buffer number to be |tmp|. If succeed,
+    // component will set input port buffer number to be |tmp|. If fail,
+    // component will keep the same buffer number as before.
+    if (msg->findInt32("android._num-input-buffers", &tmp)) {
+        err = setPortBufferNum(kPortIndexInput, tmp);
+        if (err != OK)
+            return err;
+    }
+
+    // Set the component output buffer number to be |tmp|. If succeed,
+    // component will set output port buffer number to be |tmp|. If fail,
+    // component will keep the same buffer number as before.
+    if (msg->findInt32("android._num-output-buffers", &tmp)) {
+        err = setPortBufferNum(kPortIndexOutput, tmp);
+        if (err != OK)
+            return err;
+    }
+
     int32_t frameRateInt;
     float frameRateFloat;
     if (!msg->findFloat("frame-rate", &frameRateFloat)) {
@@ -3735,6 +3879,10 @@
             break;
     }
 
+    if (err != OK) {
+        return err;
+    }
+
     // Set up color aspects on input, but propagate them to the output format, as they will
     // not be read back from encoder.
     err = setColorAspectsForVideoEncoder(msg, outputFormat, inputFormat);
@@ -3753,6 +3901,29 @@
         err = OK;
     }
 
+    if (err != OK) {
+        return err;
+    }
+
+    switch (compressionFormat) {
+        case OMX_VIDEO_CodingAVC:
+        case OMX_VIDEO_CodingHEVC:
+            err = configureTemporalLayers(msg, true /* inConfigure */, outputFormat);
+            if (err != OK) {
+                err = OK; // ignore failure
+            }
+            break;
+
+        case OMX_VIDEO_CodingVP8:
+        case OMX_VIDEO_CodingVP9:
+            // TODO: do we need to support android.generic layering? webrtc layering is
+            // already set up in setupVPXEncoderParameters.
+            break;
+
+        default:
+            break;
+    }
+
     if (err == OK) {
         ALOGI("setupVideoEncoder succeeded");
     }
@@ -4236,18 +4407,25 @@
 
     AString tsSchema;
     if (msg->findString("ts-schema", &tsSchema)) {
-        if (tsSchema == "webrtc.vp8.1-layer") {
+        unsigned int numLayers = 0;
+        unsigned int numBLayers = 0;
+        int tags;
+        char dummy;
+        if (sscanf(tsSchema.c_str(), "webrtc.vp8.%u-layer%c", &numLayers, &dummy) == 1
+                && numLayers > 0) {
             pattern = OMX_VIDEO_VPXTemporalLayerPatternWebRTC;
-            tsLayers = 1;
-        } else if (tsSchema == "webrtc.vp8.2-layer") {
+            tsLayers = numLayers;
+        } else if ((tags = sscanf(tsSchema.c_str(), "android.generic.%u%c%u%c",
+                        &numLayers, &dummy, &numBLayers, &dummy))
+                && (tags == 1 || (tags == 3 && dummy == '+'))
+                && numLayers > 0 && numLayers < UINT32_MAX - numBLayers) {
             pattern = OMX_VIDEO_VPXTemporalLayerPatternWebRTC;
-            tsLayers = 2;
-        } else if (tsSchema == "webrtc.vp8.3-layer") {
-            pattern = OMX_VIDEO_VPXTemporalLayerPatternWebRTC;
-            tsLayers = 3;
+            // VPX does not have a concept of B-frames, so just count all layers
+            tsLayers = numLayers + numBLayers;
         } else {
-            ALOGW("Unsupported ts-schema [%s]", tsSchema.c_str());
+            ALOGW("Ignoring unsupported ts-schema [%s]", tsSchema.c_str());
         }
+        tsLayers = min(tsLayers, (size_t)OMX_VIDEO_ANDROID_MAXVP8TEMPORALLAYERS);
     }
 
     OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE vp8type;
@@ -7301,7 +7479,12 @@
         }
     }
 
-    return OK;
+    status_t err = configureTemporalLayers(params, false /* inConfigure */, mOutputFormat);
+    if (err != OK) {
+        err = OK; // ignore failure
+    }
+
+    return err;
 }
 
 void ACodec::onSignalEndOfInputStream() {
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 2445842..3848502 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -104,6 +104,7 @@
 
 LOCAL_STATIC_LIBRARIES := \
         libstagefright_color_conversion \
+        libyuv_static \
         libstagefright_aacenc \
         libstagefright_matroska \
         libstagefright_mediafilter \
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 6a67fcf..58448010 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -44,6 +44,7 @@
 
 #include <byteswap.h>
 #include "include/ID3.h"
+#include "include/avc_utils.h"
 
 #ifndef UINT32_MAX
 #define UINT32_MAX       (4294967295U)
@@ -2471,6 +2472,15 @@
         if (!strcasecmp(mMetaKeyMap[index].c_str(), "com.android.capture.fps")) {
             mFileMetaData->setFloat(kKeyCaptureFramerate, *(float *)&val);
         }
+    } else if (dataType == 67 && dataSize >= 4) {
+        // BE signed int32
+        uint32_t val;
+        if (!mDataSource->getUInt32(offset, &val)) {
+            return ERROR_MALFORMED;
+        }
+        if (!strcasecmp(mMetaKeyMap[index].c_str(), "com.android.video.temporal_layers_count")) {
+            mFileMetaData->setInt32(kKeyTemporalLayerCount, val);
+        }
     } else {
         // add more keys if needed
         ALOGV("ignoring key: type %d, size %d", dataType, dataSize);
@@ -4464,6 +4474,12 @@
                     kKeyTargetTime, targetSampleTimeUs);
         }
 
+        if (mIsAVC) {
+            uint32_t layerId = FindAVCLayerId(
+                    (const uint8_t *)mBuffer->data(), mBuffer->range_length());
+            mBuffer->meta_data()->setInt32(kKeyTemporalLayerId, layerId);
+        }
+
         if (isSyncSample) {
             mBuffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);
         }
@@ -4627,6 +4643,12 @@
                         kKeyTargetTime, targetSampleTimeUs);
             }
 
+            if (mIsAVC) {
+                uint32_t layerId = FindAVCLayerId(
+                        (const uint8_t *)mBuffer->data(), mBuffer->range_length());
+                mBuffer->meta_data()->setInt32(kKeyTemporalLayerId, layerId);
+            }
+
             if (isSyncSample) {
                 mBuffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);
             }
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 24fb987..427891d 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -43,6 +43,7 @@
 
 #include "include/ESDS.h"
 #include "include/HevcUtils.h"
+#include "include/avc_utils.h"
 
 #ifndef __predict_false
 #define __predict_false(exp) __builtin_expect((exp) != 0, 0)
@@ -70,6 +71,7 @@
 static const char kMetaKey_Build[]      = "com.android.build";
 #endif
 static const char kMetaKey_CaptureFps[] = "com.android.capture.fps";
+static const char kMetaKey_TemporalLayerCount[] = "com.android.video.temporal_layers_count";
 
 static const uint8_t kMandatoryHevcNalUnitTypes[3] = {
     kHevcNalUnitTypeVps,
@@ -1162,6 +1164,37 @@
     }
 }
 
+off64_t MPEG4Writer::addMultipleLengthPrefixedSamples_l(MediaBuffer *buffer) {
+    off64_t old_offset = mOffset;
+
+    const size_t kExtensionNALSearchRange = 64; // bytes to look for non-VCL NALUs
+
+    const uint8_t *dataStart = (const uint8_t *)buffer->data() + buffer->range_offset();
+    const uint8_t *currentNalStart = dataStart;
+    const uint8_t *nextNalStart;
+    const uint8_t *data = dataStart;
+    size_t nextNalSize;
+    size_t searchSize = buffer->range_length() > kExtensionNALSearchRange ?
+                   kExtensionNALSearchRange : buffer->range_length();
+
+    while (getNextNALUnit(&data, &searchSize, &nextNalStart,
+            &nextNalSize, true) == OK) {
+        size_t currentNalSize = nextNalStart - currentNalStart - 4 /* strip start-code */;
+        MediaBuffer *nalBuf = new MediaBuffer((void *)currentNalStart, currentNalSize);
+        addLengthPrefixedSample_l(nalBuf);
+        nalBuf->release();
+
+        currentNalStart = nextNalStart;
+    }
+
+    size_t currentNalOffset = currentNalStart - dataStart;
+    buffer->set_range(buffer->range_offset() + currentNalOffset,
+            buffer->range_length() - currentNalOffset);
+    addLengthPrefixedSample_l(buffer);
+
+    return old_offset;
+}
+
 off64_t MPEG4Writer::addLengthPrefixedSample_l(MediaBuffer *buffer) {
     off64_t old_offset = mOffset;
 
@@ -1381,6 +1414,19 @@
     return OK;
 }
 
+status_t MPEG4Writer::setTemporalLayerCount(uint32_t layerCount) {
+    if (layerCount > 9) {
+        return BAD_VALUE;
+    }
+
+    if (layerCount > 0) {
+        mMetaKeys->setInt32(kMetaKey_TemporalLayerCount, layerCount);
+        mMoovExtraSize += sizeof(kMetaKey_TemporalLayerCount) + 4 + 32;
+    }
+
+    return OK;
+}
+
 void MPEG4Writer::write(const void *data, size_t size) {
     write(data, 1, size);
 }
@@ -1496,6 +1542,14 @@
     mIsMPEG4 = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4) ||
                !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC);
 
+    // store temporal layer count
+    if (!mIsAudio) {
+        int32_t count;
+        if (mMeta->findInt32(kKeyTemporalLayerCount, &count) && count > 1) {
+            mOwner->setTemporalLayerCount(count);
+        }
+    }
+
     setTimeScale();
 }
 
@@ -1684,7 +1738,7 @@
         List<MediaBuffer *>::iterator it = chunk->mSamples.begin();
 
         off64_t offset = (chunk->mTrack->isAvc() || chunk->mTrack->isHevc())
-                                ? addLengthPrefixedSample_l(*it)
+                                ? addMultipleLengthPrefixedSamples_l(*it)
                                 : addSample_l(*it);
 
         if (isFirstSample) {
@@ -2593,7 +2647,7 @@
             trackProgressStatus(timestampUs);
         }
         if (!hasMultipleTracks) {
-            off64_t offset = (mIsAvc || mIsHevc) ? mOwner->addLengthPrefixedSample_l(copy)
+            off64_t offset = (mIsAvc || mIsHevc) ? mOwner->addMultipleLengthPrefixedSamples_l(copy)
                                  : mOwner->addSample_l(copy);
 
             uint32_t count = (mOwner->use32BitFileOffset()
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index 0aafa6bd..5039922 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -648,6 +648,15 @@
             CHECK(mbuf->meta_data()->findInt64(kKeyTime, &timeUs));
             timeUs += mInputBufferTimeOffsetUs;
 
+            // Due to the extra delay adjustment at the beginning of start/resume,
+            // the adjusted timeUs may be negative if MediaCodecSource goes into pause
+            // state before feeding any buffers to the encoder. Drop the buffer in this
+            // case.
+            if (timeUs < 0) {
+                mbuf->release();
+                return OK;
+            }
+
             // push decoding time for video, or drift time for audio
             if (mIsVideo) {
                 mDecodingTimeQueue.push_back(timeUs);
@@ -832,9 +841,16 @@
                         // Time offset is not applied at
                         // feedEncoderInputBuffer() in surface input case.
                         timeUs += mInputBufferTimeOffsetUs;
-                        // GraphicBufferSource is supposed to discard samples
-                        // queued before start, and offset timeUs by start time
-                        CHECK_GE(timeUs, 0ll);
+
+                        // Due to the extra delay adjustment at the beginning of
+                        // start/resume, the adjusted timeUs may be negative if
+                        // MediaCodecSource goes into pause state before feeding
+                        // any buffers to the encoder. Drop the buffer in this case.
+                        if (timeUs < 0) {
+                            mEncoder->releaseOutputBuffer(index);
+                            break;
+                        }
+
                         // TODO:
                         // Decoding time for surface source is unavailable,
                         // use presentation time for now. May need to move
diff --git a/media/libstagefright/StagefrightMediaScanner.cpp b/media/libstagefright/StagefrightMediaScanner.cpp
index db33e83..8935f6a 100644
--- a/media/libstagefright/StagefrightMediaScanner.cpp
+++ b/media/libstagefright/StagefrightMediaScanner.cpp
@@ -122,6 +122,7 @@
         { "writer", METADATA_KEY_WRITER },
         { "compilation", METADATA_KEY_COMPILATION },
         { "isdrm", METADATA_KEY_IS_DRM },
+        { "date", METADATA_KEY_DATE },
         { "width", METADATA_KEY_VIDEO_WIDTH },
         { "height", METADATA_KEY_VIDEO_HEIGHT },
     };
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index 377f5fd..d2ba02e 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -158,6 +158,12 @@
     // TODO: Use Flexible color instead
     videoFormat->setInt32("color-format", OMX_COLOR_FormatYUV420Planar);
 
+    // For the thumbnail extraction case, try to allocate single buffer
+    // in both input and output ports. NOTE: This request may fail if
+    // component requires more than that for decoding.
+    videoFormat->setInt32("android._num-input-buffers", 1);
+    videoFormat->setInt32("android._num-output-buffers", 1);
+
     status_t err;
     sp<ALooper> looper = new ALooper;
     looper->start();
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 8a0009c..1c76ad7 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -1317,6 +1317,20 @@
         }
 
         convertMessageToMetaDataColorAspects(msg, meta);
+
+        AString tsSchema;
+        if (msg->findString("ts-schema", &tsSchema)) {
+            unsigned int numLayers = 0;
+            unsigned int numBLayers = 0;
+            char dummy;
+            int tags = sscanf(tsSchema.c_str(), "android.generic.%u%c%u%c",
+                    &numLayers, &dummy, &numBLayers, &dummy);
+            if ((tags == 1 || (tags == 3 && dummy == '+'))
+                    && numLayers > 0 && numLayers < UINT32_MAX - numBLayers
+                    && numLayers + numBLayers <= INT32_MAX) {
+                meta->setInt32(kKeyTemporalLayerCount, numLayers + numBLayers);
+            }
+        }
     } else if (mime.startsWith("audio/")) {
         int32_t numChannels;
         if (msg->findInt32("channel-count", &numChannels)) {
diff --git a/media/libstagefright/avc_utils.cpp b/media/libstagefright/avc_utils.cpp
index ccf3440..0396dc6 100644
--- a/media/libstagefright/avc_utils.cpp
+++ b/media/libstagefright/avc_utils.cpp
@@ -491,6 +491,28 @@
     return true;
 }
 
+uint32_t FindAVCLayerId(const uint8_t *data, size_t size) {
+    CHECK(data != NULL);
+
+    const unsigned kSvcNalType = 0xE;
+    const unsigned kSvcNalSearchRange = 32;
+    // SVC NAL
+    // |---0 1110|1--- ----|---- ----|iii- ---|
+    //       ^                        ^
+    //   NAL-type = 0xE               layer-Id
+    //
+    // layer_id 0 is for base layer, while 1, 2, ... are enhancement layers.
+    // Layer n uses reference frames from layer 0, 1, ..., n-1.
+
+    uint32_t layerId = 0;
+    sp<ABuffer> svcNAL = FindNAL(
+            data, size > kSvcNalSearchRange ? kSvcNalSearchRange : size, kSvcNalType);
+    if (svcNAL != NULL && svcNAL->size() >= 4) {
+        layerId = (*(svcNAL->data() + 3) >> 5) & 0x7;
+    }
+    return layerId;
+}
+
 sp<MetaData> MakeAACCodecSpecificData(
         unsigned profile, unsigned sampling_freq_index,
         unsigned channel_configuration) {
diff --git a/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp b/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
index 61b9bfd..cecc52b 100644
--- a/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
+++ b/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
@@ -122,7 +122,8 @@
       mSignalledError(false),
       mStride(mWidth){
     initPorts(
-            kNumBuffers, INPUT_BUF_SIZE, kNumBuffers, CODEC_MIME_TYPE);
+            1 /* numMinInputBuffers */, kNumBuffers, INPUT_BUF_SIZE,
+            1 /* numMinOutputBuffers */, kNumBuffers, CODEC_MIME_TYPE);
 
     GETTIME(&mTimeStart, NULL);
 
@@ -381,6 +382,48 @@
     resetPlugin();
 }
 
+bool SoftAVC::getVUIParams() {
+    IV_API_CALL_STATUS_T status;
+    ih264d_ctl_get_vui_params_ip_t s_ctl_get_vui_params_ip;
+    ih264d_ctl_get_vui_params_op_t s_ctl_get_vui_params_op;
+
+    s_ctl_get_vui_params_ip.e_cmd = IVD_CMD_VIDEO_CTL;
+    s_ctl_get_vui_params_ip.e_sub_cmd =
+        (IVD_CONTROL_API_COMMAND_TYPE_T)IH264D_CMD_CTL_GET_VUI_PARAMS;
+
+    s_ctl_get_vui_params_ip.u4_size =
+        sizeof(ih264d_ctl_get_vui_params_ip_t);
+
+    s_ctl_get_vui_params_op.u4_size = sizeof(ih264d_ctl_get_vui_params_op_t);
+
+    status = ivdec_api_function(
+            (iv_obj_t *)mCodecCtx, (void *)&s_ctl_get_vui_params_ip,
+            (void *)&s_ctl_get_vui_params_op);
+
+    if (status != IV_SUCCESS) {
+        ALOGW("Error in getting VUI params: 0x%x",
+                s_ctl_get_vui_params_op.u4_error_code);
+        return false;
+    }
+
+    int32_t primaries = s_ctl_get_vui_params_op.u1_colour_primaries;
+    int32_t transfer = s_ctl_get_vui_params_op.u1_tfr_chars;
+    int32_t coeffs = s_ctl_get_vui_params_op.u1_matrix_coeffs;
+    bool fullRange = s_ctl_get_vui_params_op.u1_video_full_range_flag;
+
+    ColorAspects colorAspects;
+    ColorUtils::convertIsoColorAspectsToCodecAspects(
+            primaries, transfer, coeffs, fullRange, colorAspects);
+
+    // Update color aspects if necessary.
+    if (colorAspectsDiffer(colorAspects, mBitstreamColorAspects)) {
+        mBitstreamColorAspects = colorAspects;
+        status_t err = handleColorAspectsChange();
+        CHECK(err == OK);
+    }
+    return true;
+}
+
 bool SoftAVC::setDecodeArgs(
         ivd_video_decode_ip_t *ps_dec_ip,
         ivd_video_decode_op_t *ps_dec_op,
@@ -606,6 +649,8 @@
 
             bool resChanged = (IVD_RES_CHANGED == (s_dec_op.u4_error_code & 0xFF));
 
+            getVUIParams();
+
             GETTIME(&mTimeEnd, NULL);
             /* Compute time taken for decode() */
             TIME_DIFF(mTimeStart, mTimeEnd, timeTaken);
@@ -641,16 +686,22 @@
                 continue;
             }
 
+            // Combine the resolution change and coloraspects change in one PortSettingChange event
+            // if necessary.
             if ((0 < s_dec_op.u4_pic_wd) && (0 < s_dec_op.u4_pic_ht)) {
                 uint32_t width = s_dec_op.u4_pic_wd;
                 uint32_t height = s_dec_op.u4_pic_ht;
                 bool portWillReset = false;
                 handlePortSettingsChange(&portWillReset, width, height);
-
                 if (portWillReset) {
                     resetDecoder();
                     return;
                 }
+            } else if (mUpdateColorAspects) {
+                notify(OMX_EventPortSettingsChanged, kOutputPortIndex,
+                    kDescribeColorAspectsIndex, NULL);
+                mUpdateColorAspects = false;
+                return;
             }
 
             if (s_dec_op.u4_output_present) {
@@ -705,6 +756,10 @@
     }
 }
 
+int SoftAVC::getColorAspectPreference() {
+    return kPreferBitstream;
+}
+
 }  // namespace android
 
 android::SoftOMXComponent *createSoftOMXComponent(
diff --git a/media/libstagefright/codecs/avcdec/SoftAVCDec.h b/media/libstagefright/codecs/avcdec/SoftAVCDec.h
index c710c76..154ca38 100644
--- a/media/libstagefright/codecs/avcdec/SoftAVCDec.h
+++ b/media/libstagefright/codecs/avcdec/SoftAVCDec.h
@@ -59,6 +59,7 @@
     virtual void onQueueFilled(OMX_U32 portIndex);
     virtual void onPortFlushCompleted(OMX_U32 portIndex);
     virtual void onReset();
+    virtual int getColorAspectPreference();
 private:
     // Number of input and output buffers
     enum {
@@ -116,6 +117,8 @@
             OMX_BUFFERHEADERTYPE *outHeader,
             size_t timeStampIx);
 
+    bool getVUIParams();
+
     DISALLOW_EVIL_CONSTRUCTORS(SoftAVC);
 };
 
diff --git a/media/libstagefright/colorconversion/Android.mk b/media/libstagefright/colorconversion/Android.mk
index 32e2dfd..0bf9701 100644
--- a/media/libstagefright/colorconversion/Android.mk
+++ b/media/libstagefright/colorconversion/Android.mk
@@ -7,7 +7,11 @@
 
 LOCAL_C_INCLUDES := \
         $(TOP)/frameworks/native/include/media/openmax \
-        $(TOP)/hardware/msm7k
+        $(TOP)/hardware/msm7k \
+        $(TOP)/external/libyuv/files/include
+
+LOCAL_STATIC_LIBRARIES := \
+        libyuv_static \
 
 LOCAL_CFLAGS += -Werror
 LOCAL_CLANG := true
diff --git a/media/libstagefright/colorconversion/ColorConverter.cpp b/media/libstagefright/colorconversion/ColorConverter.cpp
index 597167f..3ca7cc0 100644
--- a/media/libstagefright/colorconversion/ColorConverter.cpp
+++ b/media/libstagefright/colorconversion/ColorConverter.cpp
@@ -22,6 +22,10 @@
 #include <media/stagefright/ColorConverter.h>
 #include <media/stagefright/MediaErrors.h>
 
+#include "libyuv/convert_from.h"
+
+#define USE_LIBYUV
+
 namespace android {
 
 ColorConverter::ColorConverter(
@@ -103,7 +107,11 @@
 
     switch (mSrcFormat) {
         case OMX_COLOR_FormatYUV420Planar:
+#ifdef USE_LIBYUV
+            err = convertYUV420PlanarUseLibYUV(src, dst);
+#else
             err = convertYUV420Planar(src, dst);
+#endif
             break;
 
         case OMX_COLOR_FormatCbYCrY:
@@ -196,6 +204,34 @@
     return OK;
 }
 
+status_t ColorConverter::convertYUV420PlanarUseLibYUV(
+        const BitmapParams &src, const BitmapParams &dst) {
+    if (!((src.mCropLeft & 1) == 0
+            && src.cropWidth() == dst.cropWidth()
+            && src.cropHeight() == dst.cropHeight())) {
+        return ERROR_UNSUPPORTED;
+    }
+
+    uint16_t *dst_ptr = (uint16_t *)dst.mBits
+        + dst.mCropTop * dst.mWidth + dst.mCropLeft;
+
+    const uint8_t *src_y =
+        (const uint8_t *)src.mBits + src.mCropTop * src.mWidth + src.mCropLeft;
+
+    const uint8_t *src_u =
+        (const uint8_t *)src_y + src.mWidth * src.mHeight
+        + src.mCropTop * (src.mWidth / 2) + src.mCropLeft / 2;
+
+    const uint8_t *src_v =
+        src_u + (src.mWidth / 2) * (src.mHeight / 2);
+
+
+    libyuv::I420ToRGB565(src_y, src.mWidth, src_u, src.mWidth / 2, src_v, src.mWidth / 2,
+            (uint8 *)dst_ptr, dst.mWidth * 2, dst.mWidth, dst.mHeight);
+
+    return OK;
+}
+
 status_t ColorConverter::convertYUV420Planar(
         const BitmapParams &src, const BitmapParams &dst) {
     if (!((src.mCropLeft & 1) == 0
diff --git a/media/libstagefright/include/SoftVideoDecoderOMXComponent.h b/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
index 4529007..c9fd745 100644
--- a/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
+++ b/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
@@ -21,6 +21,7 @@
 #include "SimpleSoftOMXComponent.h"
 
 #include <media/stagefright/foundation/AHandlerReflector.h>
+#include <media/stagefright/foundation/ColorUtils.h>
 #include <media/IOMX.h>
 
 #include <utils/RefBase.h>
@@ -43,6 +44,16 @@
             OMX_COMPONENTTYPE **component);
 
 protected:
+    enum {
+        kDescribeColorAspectsIndex = kPrepareForAdaptivePlaybackIndex + 1,
+    };
+
+    enum {
+        kNotSupported,
+        kPreferBitstream,
+        kPreferContainer,
+    };
+
     virtual void onPortEnableCompleted(OMX_U32 portIndex, bool enabled);
     virtual void onReset();
 
@@ -55,15 +66,37 @@
     virtual OMX_ERRORTYPE getConfig(
             OMX_INDEXTYPE index, OMX_PTR params);
 
+    virtual OMX_ERRORTYPE setConfig(
+            OMX_INDEXTYPE index, const OMX_PTR params);
+
     virtual OMX_ERRORTYPE getExtensionIndex(
             const char *name, OMX_INDEXTYPE *index);
 
+    virtual bool supportsDescribeColorAspects();
+
+    virtual int getColorAspectPreference();
+
+    // This function sets both minimum buffer count and actual buffer count of
+    // input port to be |numInputBuffers|. It will also set both minimum buffer
+    // count and actual buffer count of output port to be |numOutputBuffers|.
     void initPorts(OMX_U32 numInputBuffers,
             OMX_U32 inputBufferSize,
             OMX_U32 numOutputBuffers,
             const char *mimeType,
             OMX_U32 minCompressionRatio = 1u);
 
+    // This function sets input port's minimum buffer count to |numMinInputBuffers|,
+    // sets input port's actual buffer count to |numInputBuffers|, sets output port's
+    // minimum buffer count to |numMinOutputBuffers| and sets output port's actual buffer
+    // count to be |numOutputBuffers|.
+    void initPorts(OMX_U32 numMinInputBuffers,
+            OMX_U32 numInputBuffers,
+            OMX_U32 inputBufferSize,
+            OMX_U32 numMinOutputBuffers,
+            OMX_U32 numOutputBuffers,
+            const char *mimeType,
+            OMX_U32 minCompressionRatio = 1u);
+
     virtual void updatePortDefinitions(bool updateCrop = true, bool updateInputSize = false);
 
     uint32_t outputBufferWidth();
@@ -74,6 +107,10 @@
         kCropSet,
         kCropChanged,
     };
+
+    // This function will handle several port change events which include
+    // size changed, crop changed, stride changed and coloraspects changed.
+    // It will trigger OMX_EventPortSettingsChanged event if necessary.
     void handlePortSettingsChange(
             bool *portWillReset, uint32_t width, uint32_t height,
             CropSettingsMode cropSettingsMode = kCropUnSet, bool fakeStride = false);
@@ -99,6 +136,29 @@
         AWAITING_ENABLED
     } mOutputPortSettingsChange;
 
+    bool mUpdateColorAspects;
+
+    Mutex mColorAspectsLock;
+    // color aspects passed from the framework.
+    ColorAspects mDefaultColorAspects;
+    // color aspects parsed from the bitstream.
+    ColorAspects mBitstreamColorAspects;
+    // final color aspects after combining the above two aspects.
+    ColorAspects mFinalColorAspects;
+
+    bool colorAspectsDiffer(const ColorAspects &a, const ColorAspects &b);
+
+    // This functions takes two color aspects and updates the mFinalColorAspects
+    // based on |preferredAspects|.
+    void updateFinalColorAspects(
+            const ColorAspects &otherAspects, const ColorAspects &preferredAspects);
+
+    // This function will update the mFinalColorAspects based on codec preference.
+    status_t handleColorAspectsChange();
+
+    // Helper function to dump the ColorAspects.
+    void dumpColorAspects(const ColorAspects &colorAspects);
+
 private:
     uint32_t mMinInputBufferSize;
     uint32_t mMinCompressionRatio;
diff --git a/media/libstagefright/include/avc_utils.h b/media/libstagefright/include/avc_utils.h
index 7465b35..235ee63 100644
--- a/media/libstagefright/include/avc_utils.h
+++ b/media/libstagefright/include/avc_utils.h
@@ -85,6 +85,7 @@
 
 bool IsIDR(const sp<ABuffer> &accessUnit);
 bool IsAVCReferenceFrame(const sp<ABuffer> &accessUnit);
+uint32_t FindAVCLayerId(const uint8_t *data, size_t size);
 
 const char *AVCProfileToString(uint8_t profile);
 
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index 995e50e..0c8fd67 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -775,7 +775,9 @@
 int64_t GraphicBufferSource::getTimestamp(const BufferItem &item) {
     int64_t timeUs = item.mTimestamp / 1000;
 
-    if (mTimePerCaptureUs > 0ll) {
+    if (mTimePerCaptureUs > 0ll
+            && (mTimePerCaptureUs > 2 * mTimePerFrameUs
+            || mTimePerFrameUs > 2 * mTimePerCaptureUs)) {
         // Time lapse or slow motion mode
         if (mPrevCaptureUs < 0ll) {
             // first capture
@@ -801,6 +803,8 @@
 
         return mPrevFrameUs;
     } else if (mMaxTimestampGapUs > 0ll) {
+        //TODO: Fix the case when mMaxTimestampGapUs and mTimePerCaptureUs are both set.
+
         /* Cap timestamp gap between adjacent frames to specified max
          *
          * In the scenario of cast mirroring, encoding could be suspended for
diff --git a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
index d3553bd..409cef7 100644
--- a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
@@ -68,6 +68,11 @@
         mCodingType(codingType),
         mProfileLevels(profileLevels),
         mNumProfileLevels(numProfileLevels) {
+
+    // init all the color aspects to be Unspecified.
+    memset(&mDefaultColorAspects, 0, sizeof(ColorAspects));
+    memset(&mBitstreamColorAspects, 0, sizeof(ColorAspects));
+    memset(&mFinalColorAspects, 0, sizeof(ColorAspects));
 }
 
 void SoftVideoDecoderOMXComponent::initPorts(
@@ -76,6 +81,18 @@
         OMX_U32 numOutputBuffers,
         const char *mimeType,
         OMX_U32 minCompressionRatio) {
+    initPorts(numInputBuffers, numInputBuffers, inputBufferSize,
+            numOutputBuffers, numOutputBuffers, mimeType, minCompressionRatio);
+}
+
+void SoftVideoDecoderOMXComponent::initPorts(
+        OMX_U32 numMinInputBuffers,
+        OMX_U32 numInputBuffers,
+        OMX_U32 inputBufferSize,
+        OMX_U32 numMinOutputBuffers,
+        OMX_U32 numOutputBuffers,
+        const char *mimeType,
+        OMX_U32 minCompressionRatio) {
     mMinInputBufferSize = inputBufferSize;
     mMinCompressionRatio = minCompressionRatio;
 
@@ -84,8 +101,8 @@
 
     def.nPortIndex = kInputPortIndex;
     def.eDir = OMX_DirInput;
-    def.nBufferCountMin = numInputBuffers;
-    def.nBufferCountActual = def.nBufferCountMin;
+    def.nBufferCountMin = numMinInputBuffers;
+    def.nBufferCountActual = numInputBuffers;
     def.nBufferSize = inputBufferSize;
     def.bEnabled = OMX_TRUE;
     def.bPopulated = OMX_FALSE;
@@ -107,8 +124,8 @@
 
     def.nPortIndex = kOutputPortIndex;
     def.eDir = OMX_DirOutput;
-    def.nBufferCountMin = numOutputBuffers;
-    def.nBufferCountActual = def.nBufferCountMin;
+    def.nBufferCountMin = numMinOutputBuffers;
+    def.nBufferCountActual = numOutputBuffers;
     def.bEnabled = OMX_TRUE;
     def.bPopulated = OMX_FALSE;
     def.eDomain = OMX_PortDomainVideo;
@@ -224,9 +241,66 @@
             notify(OMX_EventPortSettingsChanged, kOutputPortIndex,
                    OMX_IndexConfigCommonOutputCrop, NULL);
         }
+    } else if (mUpdateColorAspects) {
+        notify(OMX_EventPortSettingsChanged, kOutputPortIndex,
+                kDescribeColorAspectsIndex, NULL);
+        mUpdateColorAspects = false;
     }
 }
 
+void SoftVideoDecoderOMXComponent::dumpColorAspects(const ColorAspects &colorAspects) {
+    ALOGD("dumpColorAspects: (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) ",
+            colorAspects.mRange, asString(colorAspects.mRange),
+            colorAspects.mPrimaries, asString(colorAspects.mPrimaries),
+            colorAspects.mMatrixCoeffs, asString(colorAspects.mMatrixCoeffs),
+            colorAspects.mTransfer, asString(colorAspects.mTransfer));
+}
+
+bool SoftVideoDecoderOMXComponent::colorAspectsDiffer(
+        const ColorAspects &a, const ColorAspects &b) {
+    if (a.mRange != b.mRange
+        || a.mPrimaries != b.mPrimaries
+        || a.mTransfer != b.mTransfer
+        || a.mMatrixCoeffs != b.mMatrixCoeffs) {
+        return true;
+    }
+    return false;
+}
+
+void SoftVideoDecoderOMXComponent::updateFinalColorAspects(
+        const ColorAspects &otherAspects, const ColorAspects &preferredAspects) {
+    Mutex::Autolock autoLock(mColorAspectsLock);
+    ColorAspects newAspects;
+    newAspects.mRange = preferredAspects.mRange != ColorAspects::RangeUnspecified ?
+        preferredAspects.mRange : otherAspects.mRange;
+    newAspects.mPrimaries = preferredAspects.mPrimaries != ColorAspects::PrimariesUnspecified ?
+        preferredAspects.mPrimaries : otherAspects.mPrimaries;
+    newAspects.mTransfer = preferredAspects.mTransfer != ColorAspects::TransferUnspecified ?
+        preferredAspects.mTransfer : otherAspects.mTransfer;
+    newAspects.mMatrixCoeffs = preferredAspects.mMatrixCoeffs != ColorAspects::MatrixUnspecified ?
+        preferredAspects.mMatrixCoeffs : otherAspects.mMatrixCoeffs;
+
+    // Check to see if need update mFinalColorAspects.
+    if (colorAspectsDiffer(mFinalColorAspects, newAspects)) {
+        mFinalColorAspects = newAspects;
+        mUpdateColorAspects = true;
+    }
+}
+
+status_t SoftVideoDecoderOMXComponent::handleColorAspectsChange() {
+    int perference = getColorAspectPreference();
+    ALOGD("Color Aspects preference: %d ", perference);
+
+    if (perference == kPreferBitstream) {
+        updateFinalColorAspects(mDefaultColorAspects, mBitstreamColorAspects);
+    } else if (perference == kPreferContainer) {
+        updateFinalColorAspects(mBitstreamColorAspects, mDefaultColorAspects);
+    } else {
+        return OMX_ErrorUnsupportedSetting;
+    }
+    return OK;
+}
+
 void SoftVideoDecoderOMXComponent::copyYV12FrameToOutputBuffer(
         uint8_t *dst, const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV,
         size_t srcYStride, size_t srcUStride, size_t srcVStride) {
@@ -450,7 +524,7 @@
 
 OMX_ERRORTYPE SoftVideoDecoderOMXComponent::getConfig(
         OMX_INDEXTYPE index, OMX_PTR params) {
-    switch (index) {
+    switch ((int)index) {
         case OMX_IndexConfigCommonOutputCrop:
         {
             OMX_CONFIG_RECTTYPE *rectParams = (OMX_CONFIG_RECTTYPE *)params;
@@ -470,22 +544,88 @@
 
             return OMX_ErrorNone;
         }
+        case kDescribeColorAspectsIndex:
+        {
+            if (!supportsDescribeColorAspects()) {
+                return OMX_ErrorUnsupportedIndex;
+            }
+
+            DescribeColorAspectsParams* colorAspectsParams =
+                    (DescribeColorAspectsParams *)params;
+
+            if (colorAspectsParams->nPortIndex != kOutputPortIndex) {
+                return OMX_ErrorBadParameter;
+            }
+
+            colorAspectsParams->sAspects = mFinalColorAspects;
+            if (colorAspectsParams->bRequestingDataSpace || colorAspectsParams->bDataSpaceChanged) {
+                return OMX_ErrorUnsupportedSetting;
+            }
+
+            return OMX_ErrorNone;
+        }
 
         default:
             return OMX_ErrorUnsupportedIndex;
     }
 }
 
+OMX_ERRORTYPE SoftVideoDecoderOMXComponent::setConfig(
+        OMX_INDEXTYPE index, const OMX_PTR params){
+    switch ((int)index) {
+        case kDescribeColorAspectsIndex:
+        {
+            if (!supportsDescribeColorAspects()) {
+                return OMX_ErrorUnsupportedIndex;
+            }
+            const DescribeColorAspectsParams* colorAspectsParams =
+                    (const DescribeColorAspectsParams *)params;
+
+            if (!isValidOMXParam(colorAspectsParams)) {
+                return OMX_ErrorBadParameter;
+            }
+
+            if (colorAspectsParams->nPortIndex != kOutputPortIndex) {
+                return OMX_ErrorBadParameter;
+            }
+
+            // Update color aspects if necessary.
+            if (colorAspectsDiffer(colorAspectsParams->sAspects, mDefaultColorAspects)) {
+                mDefaultColorAspects = colorAspectsParams->sAspects;
+                status_t err = handleColorAspectsChange();
+                CHECK(err == OK);
+            }
+            return OMX_ErrorNone;
+        }
+
+        default:
+            return OMX_ErrorUnsupportedIndex;
+    }
+}
+
+
 OMX_ERRORTYPE SoftVideoDecoderOMXComponent::getExtensionIndex(
         const char *name, OMX_INDEXTYPE *index) {
     if (!strcmp(name, "OMX.google.android.index.prepareForAdaptivePlayback")) {
         *(int32_t*)index = kPrepareForAdaptivePlaybackIndex;
         return OMX_ErrorNone;
+    } else if (!strcmp(name, "OMX.google.android.index.describeColorAspects")
+                && supportsDescribeColorAspects()) {
+        *(int32_t*)index = kDescribeColorAspectsIndex;
+        return OMX_ErrorNone;
     }
 
     return SimpleSoftOMXComponent::getExtensionIndex(name, index);
 }
 
+bool SoftVideoDecoderOMXComponent::supportsDescribeColorAspects() {
+    return getColorAspectPreference() != kNotSupported;
+}
+
+int SoftVideoDecoderOMXComponent::getColorAspectPreference() {
+    return kNotSupported;
+}
+
 void SoftVideoDecoderOMXComponent::onReset() {
     mOutputPortSettingsChange = NONE;
 }
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index f9a9ab9..abe2582 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -1042,49 +1042,13 @@
                     return;
                 }
 
-                sp<ABuffer> accessUnit;
-                CHECK(msg->findBuffer("access-unit", &accessUnit));
-
-                uint32_t seqNum = (uint32_t)accessUnit->int32Data();
-
                 if (mSeekPending) {
                     ALOGV("we're seeking, dropping stale packet.");
                     break;
                 }
 
-                if (track->mNewSegment) {
-                    // The sequence number from RTP packet has only 16 bits and is extended
-                    // by ARTPSource. Only the low 16 bits of seq in RTP-Info of reply of
-                    // RTSP "PLAY" command should be used to detect the first RTP packet
-                    // after seeking.
-                    if (track->mAllowedStaleAccessUnits > 0) {
-                        if ((((seqNum ^ track->mFirstSeqNumInSegment) & 0xffff) != 0)) {
-                            // Not the first rtp packet of the stream after seeking, discarding.
-                            track->mAllowedStaleAccessUnits--;
-                            ALOGV("discarding stale access unit (0x%x : 0x%x)",
-                                 seqNum, track->mFirstSeqNumInSegment);
-                            break;
-                        }
-                    } else { // track->mAllowedStaleAccessUnits <= 0
-                        mNumAccessUnitsReceived = 0;
-                        ALOGW_IF(track->mAllowedStaleAccessUnits == 0,
-                             "Still no first rtp packet after %d stale ones",
-                             kMaxAllowedStaleAccessUnits);
-                        track->mAllowedStaleAccessUnits = -1;
-                        break;
-                    }
-
-                    // Now found the first rtp packet of the stream after seeking.
-                    track->mFirstSeqNumInSegment = seqNum;
-                    track->mNewSegment = false;
-                }
-
-                if (seqNum < track->mFirstSeqNumInSegment) {
-                    ALOGV("dropping stale access-unit (%d < %d)",
-                         seqNum, track->mFirstSeqNumInSegment);
-                    break;
-                }
-
+                sp<ABuffer> accessUnit;
+                CHECK(msg->findBuffer("access-unit", &accessUnit));
                 onAccessUnitComplete(trackIndex, accessUnit);
                 break;
             }
@@ -1852,17 +1816,16 @@
             int32_t trackIndex, const sp<ABuffer> &accessUnit) {
         ALOGV("onAccessUnitComplete track %d", trackIndex);
 
+        TrackInfo *track = &mTracks.editItemAt(trackIndex);
         if(!mPlayResponseParsed){
-            ALOGI("play response is not parsed, storing accessunit");
-            TrackInfo *track = &mTracks.editItemAt(trackIndex);
+            uint32_t seqNum = (uint32_t)accessUnit->int32Data();
+            ALOGI("play response is not parsed, storing accessunit %u", seqNum);
             track->mPackets.push_back(accessUnit);
             return;
         }
 
         handleFirstAccessUnit();
 
-        TrackInfo *track = &mTracks.editItemAt(trackIndex);
-
         if (!mAllTracksHaveTime) {
             ALOGV("storing accessUnit, no time established yet");
             track->mPackets.push_back(accessUnit);
@@ -1873,6 +1836,41 @@
             sp<ABuffer> accessUnit = *track->mPackets.begin();
             track->mPackets.erase(track->mPackets.begin());
 
+            uint32_t seqNum = (uint32_t)accessUnit->int32Data();
+            if (track->mNewSegment) {
+                // The sequence number from RTP packet has only 16 bits and is extended
+                // by ARTPSource. Only the low 16 bits of seq in RTP-Info of reply of
+                // RTSP "PLAY" command should be used to detect the first RTP packet
+                // after seeking.
+                if (track->mAllowedStaleAccessUnits > 0) {
+                    if ((((seqNum ^ track->mFirstSeqNumInSegment) & 0xffff) != 0)) {
+                        // Not the first rtp packet of the stream after seeking, discarding.
+                        track->mAllowedStaleAccessUnits--;
+                        ALOGV("discarding stale access unit (0x%x : 0x%x)",
+                             seqNum, track->mFirstSeqNumInSegment);
+                        continue;
+                    }
+                } else { // track->mAllowedStaleAccessUnits <= 0
+                    mNumAccessUnitsReceived = 0;
+                    ALOGW_IF(track->mAllowedStaleAccessUnits == 0,
+                         "Still no first rtp packet after %d stale ones",
+                         kMaxAllowedStaleAccessUnits);
+                    track->mAllowedStaleAccessUnits = -1;
+                    return;
+                }
+
+                // Now found the first rtp packet of the stream after seeking.
+                track->mFirstSeqNumInSegment = seqNum;
+                track->mNewSegment = false;
+            }
+
+            if (seqNum < track->mFirstSeqNumInSegment) {
+                ALOGV("dropping stale access-unit (%d < %d)",
+                     seqNum, track->mFirstSeqNumInSegment);
+                continue;
+            }
+
+
             if (addMediaTimestamp(trackIndex, track, accessUnit)) {
                 postQueueAccessUnit(trackIndex, accessUnit);
             }
diff --git a/media/ndk/Android.mk b/media/ndk/Android.mk
index 7f6b66b..74729e4 100644
--- a/media/ndk/Android.mk
+++ b/media/ndk/Android.mk
@@ -40,7 +40,7 @@
 
 LOCAL_CFLAGS += -fvisibility=hidden -D EXPORT='__attribute__ ((visibility ("default")))'
 
-LOCAL_CFLAGS += -Werror
+LOCAL_CFLAGS += -Werror -Wall
 
 LOCAL_SHARED_LIBRARIES := \
     libbinder \
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 4f826e5..8b831f0 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -14,6 +14,8 @@
     liblog \
     libbinder
 
+LOCAL_CFLAGS := -Wall -Werror
+
 include $(BUILD_SHARED_LIBRARY)
 
 include $(CLEAR_VARS)
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index d2fee81..149b492 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -132,6 +132,9 @@
     case AUDIO_FORMAT_AC3: return "ac-3";
     case AUDIO_FORMAT_E_AC3: return "e-ac-3";
     case AUDIO_FORMAT_IEC61937: return "iec61937";
+    case AUDIO_FORMAT_DTS: return "dts";
+    case AUDIO_FORMAT_DTS_HD: return "dts-hd";
+    case AUDIO_FORMAT_DOLBY_TRUEHD: return "dolby-truehd";
     default:
         break;
     }
@@ -576,7 +579,7 @@
         audio_format_t format,
         audio_channel_mask_t channelMask,
         size_t *frameCount,
-        IAudioFlinger::track_flags_t *flags,
+        audio_output_flags_t *flags,
         const sp<IMemory>& sharedBuffer,
         audio_io_handle_t output,
         pid_t pid,
@@ -660,7 +663,7 @@
                 sp<PlaybackThread> t = mPlaybackThreads.valueAt(i);
                 if (mPlaybackThreads.keyAt(i) != output) {
                     uint32_t sessions = t->hasAudioSession(lSessionId);
-                    if (sessions & PlaybackThread::EFFECT_SESSION) {
+                    if (sessions & ThreadBase::EFFECT_SESSION) {
                         effectThread = t.get();
                         break;
                     }
@@ -1463,7 +1466,7 @@
         audio_channel_mask_t channelMask,
         const String16& opPackageName,
         size_t *frameCount,
-        IAudioFlinger::track_flags_t *flags,
+        audio_input_flags_t *flags,
         pid_t pid,
         pid_t tid,
         int clientUid,
@@ -1694,14 +1697,14 @@
 uint32_t AudioFlinger::getPrimaryOutputSamplingRate()
 {
     Mutex::Autolock _l(mLock);
-    PlaybackThread *thread = primaryPlaybackThread_l();
+    PlaybackThread *thread = fastPlaybackThread_l();
     return thread != NULL ? thread->sampleRate() : 0;
 }
 
 size_t AudioFlinger::getPrimaryOutputFrameCount()
 {
     Mutex::Autolock _l(mLock);
-    PlaybackThread *thread = primaryPlaybackThread_l();
+    PlaybackThread *thread = fastPlaybackThread_l();
     return thread != NULL ? thread->frameCountHAL() : 0;
 }
 
@@ -1762,7 +1765,7 @@
     for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
         sp<PlaybackThread> thread = mPlaybackThreads.valueAt(i);
         uint32_t sessions = thread->hasAudioSession(sessionId);
-        if (sessions & PlaybackThread::TRACK_SESSION) {
+        if (sessions & ThreadBase::TRACK_SESSION) {
             AudioParameter param = AudioParameter();
             param.addInt(String8(AUDIO_PARAMETER_STREAM_HW_AV_SYNC), value);
             thread->setParameters(param.toString());
@@ -2195,7 +2198,7 @@
         }
 #endif
 
-        AudioStreamIn *inputStream = new AudioStreamIn(inHwDev, inStream);
+        AudioStreamIn *inputStream = new AudioStreamIn(inHwDev, inStream, flags);
 
         // Start record thread
         // RecordThread requires both input and output device indication to forward to audio
@@ -2528,6 +2531,25 @@
     return thread->outDevice();
 }
 
+AudioFlinger::PlaybackThread *AudioFlinger::fastPlaybackThread_l() const
+{
+    size_t minFrameCount = 0;
+    PlaybackThread *minThread = NULL;
+    for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+        PlaybackThread *thread = mPlaybackThreads.valueAt(i).get();
+        if (!thread->isDuplicating()) {
+            size_t frameCount = thread->frameCountHAL();
+            if (frameCount != 0 && (minFrameCount == 0 || frameCount < minFrameCount ||
+                    (frameCount == minFrameCount && thread->hasFastMixer() &&
+                    /*minThread != NULL &&*/ !minThread->hasFastMixer()))) {
+                minFrameCount = frameCount;
+                minThread = thread;
+            }
+        }
+    }
+    return minThread;
+}
+
 sp<AudioFlinger::SyncEvent> AudioFlinger::createSyncEvent(AudioSystem::sync_event_t type,
                                     audio_session_t triggerSession,
                                     audio_session_t listenerSession,
@@ -2820,14 +2842,11 @@
         return INVALID_OPERATION;
     }
 
-    // Check whether the destination thread has a channel count of FCC_2, which is
-    // currently required for (most) effects. Prevent moving the effect chain here rather
-    // than disabling the addEffect_l() call in dstThread below.
-    if ((dstThread->type() == ThreadBase::MIXER || dstThread->isDuplicating()) &&
-            dstThread->mChannelCount != FCC_2) {
+    // Check whether the destination thread and all effects in the chain are compatible
+    if (!chain->isCompatibleWithThread_l(dstThread)) {
         ALOGW("moveEffectChain_l() effect chain failed because"
-                " destination thread %p channel count(%u) != %u",
-                dstThread, dstThread->mChannelCount, FCC_2);
+                " destination thread %p is not compatible with effects in the chain",
+                dstThread);
         return INVALID_OPERATION;
     }
 
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 59ad688..c56dcc1 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -104,7 +104,7 @@
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
                                 size_t *pFrameCount,
-                                IAudioFlinger::track_flags_t *flags,
+                                audio_output_flags_t *flags,
                                 const sp<IMemory>& sharedBuffer,
                                 audio_io_handle_t output,
                                 pid_t pid,
@@ -120,7 +120,7 @@
                                 audio_channel_mask_t channelMask,
                                 const String16& opPackageName,
                                 size_t *pFrameCount,
-                                IAudioFlinger::track_flags_t *flags,
+                                audio_input_flags_t *flags,
                                 pid_t pid,
                                 pid_t tid,
                                 int clientUid,
@@ -575,6 +575,9 @@
               PlaybackThread *primaryPlaybackThread_l() const;
               audio_devices_t primaryOutputDevice_l() const;
 
+              // return the playback thread with smallest HAL buffer size, and prefer fast
+              PlaybackThread *fastPlaybackThread_l() const;
+
               sp<PlaybackThread> getEffectThread_l(audio_session_t sessionId, int EffectId);
 
 
@@ -609,11 +612,12 @@
     struct AudioStreamIn {
         AudioHwDevice* const audioHwDev;
         audio_stream_in_t* const stream;
+        audio_input_flags_t flags;
 
         audio_hw_device_t* hwDev() const { return audioHwDev->hwDevice(); }
 
-        AudioStreamIn(AudioHwDevice *dev, audio_stream_in_t *in) :
-            audioHwDev(dev), stream(in) {}
+        AudioStreamIn(AudioHwDevice *dev, audio_stream_in_t *in, audio_input_flags_t flags) :
+            audioHwDev(dev), stream(in), flags(flags) {}
     };
 
     // for mAudioSessionRefs only
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 055e915..e6c8abc 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -1966,4 +1966,27 @@
     }
 }
 
+bool AudioFlinger::EffectChain::hasSoftwareEffect() const
+{
+    Mutex::Autolock _l(mLock);
+    for (size_t i = 0; i < mEffects.size(); i++) {
+        if (mEffects[i]->isImplementationSoftware()) {
+            return true;
+        }
+    }
+    return false;
+}
+
+// isCompatibleWithThread_l() must be called with thread->mLock held
+bool AudioFlinger::EffectChain::isCompatibleWithThread_l(const sp<ThreadBase>& thread) const
+{
+    Mutex::Autolock _l(mLock);
+    for (size_t i = 0; i < mEffects.size(); i++) {
+        if (thread->checkEffectCompatibility_l(&(mEffects[i]->desc()), mSessionId) != NO_ERROR) {
+            return false;
+        }
+    }
+    return true;
+}
+
 } // namespace android
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index bc9bc94..3b62652 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -117,6 +117,8 @@
     void             unlock() { mLock.unlock(); }
     bool             isOffloadable() const
                         { return (mDescriptor.flags & EFFECT_FLAG_OFFLOAD_SUPPORTED) != 0; }
+    bool             isImplementationSoftware() const
+                        { return (mDescriptor.flags & EFFECT_FLAG_HW_ACC_MASK) == 0; }
     status_t         setOffloaded(bool offloaded, audio_io_handle_t io);
     bool             isOffloaded() const;
     void             addEffectToHal_l();
@@ -330,6 +332,11 @@
 
     void syncHalEffectsState();
 
+    bool hasSoftwareEffect() const;
+
+    // isCompatibleWithThread_l() must be called with thread->mLock held
+    bool isCompatibleWithThread_l(const sp<ThreadBase>& thread) const;
+
     void dump(int fd, const Vector<String16>& args);
 
 protected:
@@ -361,30 +368,30 @@
 
     void setThread(const sp<ThreadBase>& thread);
 
-    wp<ThreadBase> mThread;     // parent mixer thread
-    Mutex mLock;                // mutex protecting effect list
-    Vector< sp<EffectModule> > mEffects; // list of effect modules
-    audio_session_t mSessionId; // audio session ID
-    int16_t *mInBuffer;         // chain input buffer
-    int16_t *mOutBuffer;        // chain output buffer
+             wp<ThreadBase> mThread;     // parent mixer thread
+    mutable  Mutex mLock;        // mutex protecting effect list
+             Vector< sp<EffectModule> > mEffects; // list of effect modules
+             audio_session_t mSessionId; // audio session ID
+             int16_t *mInBuffer;         // chain input buffer
+             int16_t *mOutBuffer;        // chain output buffer
 
     // 'volatile' here means these are accessed with atomic operations instead of mutex
     volatile int32_t mActiveTrackCnt;    // number of active tracks connected
     volatile int32_t mTrackCnt;          // number of tracks connected
 
-    int32_t mTailBufferCount;   // current effect tail buffer count
-    int32_t mMaxTailBuffers;    // maximum effect tail buffers
-    bool mOwnInBuffer;          // true if the chain owns its input buffer
-    int mVolumeCtrlIdx;         // index of insert effect having control over volume
-    uint32_t mLeftVolume;       // previous volume on left channel
-    uint32_t mRightVolume;      // previous volume on right channel
-    uint32_t mNewLeftVolume;       // new volume on left channel
-    uint32_t mNewRightVolume;      // new volume on right channel
-    uint32_t mStrategy; // strategy for this effect chain
-    // mSuspendedEffects lists all effects currently suspended in the chain.
-    // Use effect type UUID timelow field as key. There is no real risk of identical
-    // timeLow fields among effect type UUIDs.
-    // Updated by updateSuspendedSessions_l() only.
-    KeyedVector< int, sp<SuspendedEffectDesc> > mSuspendedEffects;
+             int32_t mTailBufferCount;   // current effect tail buffer count
+             int32_t mMaxTailBuffers;    // maximum effect tail buffers
+             bool mOwnInBuffer;          // true if the chain owns its input buffer
+             int mVolumeCtrlIdx;         // index of insert effect having control over volume
+             uint32_t mLeftVolume;       // previous volume on left channel
+             uint32_t mRightVolume;      // previous volume on right channel
+             uint32_t mNewLeftVolume;       // new volume on left channel
+             uint32_t mNewRightVolume;      // new volume on right channel
+             uint32_t mStrategy; // strategy for this effect chain
+             // mSuspendedEffects lists all effects currently suspended in the chain.
+             // Use effect type UUID timelow field as key. There is no real risk of identical
+             // timeLow fields among effect type UUIDs.
+             // Updated by updateSuspendedSessions_l() only.
+             KeyedVector< int, sp<SuspendedEffectDesc> > mSuspendedEffects;
     volatile int32_t mForceVolume; // force next volume command because a new effect was enabled
 };
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index f8671b5..bee17fd 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -168,12 +168,46 @@
                 ALOGV("createAudioPatch() removing patch handle %d", *handle);
                 halHandle = mPatches[index]->mHalHandle;
                 Patch *removedPatch = mPatches[index];
+                // free resources owned by the removed patch if applicable
+                // 1) if a software patch is present, release the playback and capture threads and
+                // tracks created. This will also release the corresponding audio HAL patches
                 if ((removedPatch->mRecordPatchHandle
                         != AUDIO_PATCH_HANDLE_NONE) ||
                         (removedPatch->mPlaybackPatchHandle !=
                                 AUDIO_PATCH_HANDLE_NONE)) {
                     clearPatchConnections(removedPatch);
                 }
+                // 2) if the new patch and old patch source or sink are devices from different
+                // hw modules,  clear the audio HAL patches now because they will not be updated
+                // by call to create_audio_patch() below which will happen on a different HW module
+                if (halHandle != AUDIO_PATCH_HANDLE_NONE) {
+                    audio_module_handle_t hwModule = AUDIO_MODULE_HANDLE_NONE;
+                    if ((removedPatch->mAudioPatch.sources[0].type == AUDIO_PORT_TYPE_DEVICE) &&
+                        ((patch->sources[0].type != AUDIO_PORT_TYPE_DEVICE) ||
+                          (removedPatch->mAudioPatch.sources[0].ext.device.hw_module !=
+                           patch->sources[0].ext.device.hw_module))) {
+                        hwModule = removedPatch->mAudioPatch.sources[0].ext.device.hw_module;
+                    } else if ((patch->num_sinks == 0) ||
+                            ((removedPatch->mAudioPatch.sinks[0].type == AUDIO_PORT_TYPE_DEVICE) &&
+                             ((patch->sinks[0].type != AUDIO_PORT_TYPE_DEVICE) ||
+                              (removedPatch->mAudioPatch.sinks[0].ext.device.hw_module !=
+                               patch->sinks[0].ext.device.hw_module)))) {
+                        // Note on (patch->num_sinks == 0): this situation should not happen as
+                        // these special patches are only created by the policy manager but just
+                        // in case, systematically clear the HAL patch.
+                        // Note that removedPatch->mAudioPatch.num_sinks cannot be 0 here because
+                        // halHandle would be AUDIO_PATCH_HANDLE_NONE in this case.
+                        hwModule = removedPatch->mAudioPatch.sinks[0].ext.device.hw_module;
+                    }
+                    if (hwModule != AUDIO_MODULE_HANDLE_NONE) {
+                        ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(hwModule);
+                        if (index >= 0) {
+                            audio_hw_device_t *hwDevice =
+                                    audioflinger->mAudioHwDevs.valueAt(index)->hwDevice();
+                            hwDevice->release_audio_patch(hwDevice, halHandle);
+                        }
+                    }
+                }
                 mPatches.removeAt(index);
                 delete removedPatch;
                 break;
@@ -437,7 +471,7 @@
                                              format,
                                              frameCount,
                                              NULL,
-                                             IAudioFlinger::TRACK_DEFAULT);
+                                             AUDIO_INPUT_FLAG_NONE);
     if (patch->mPatchRecord == 0) {
         return NO_MEMORY;
     }
@@ -457,7 +491,7 @@
                                            format,
                                            frameCount,
                                            patch->mPatchRecord->buffer(),
-                                           IAudioFlinger::TRACK_DEFAULT);
+                                           AUDIO_OUTPUT_FLAG_NONE);
     if (patch->mPatchTrack == 0) {
         return NO_MEMORY;
     }
diff --git a/services/audioflinger/PatchPanel.h b/services/audioflinger/PatchPanel.h
index e31179c..16ec278 100644
--- a/services/audioflinger/PatchPanel.h
+++ b/services/audioflinger/PatchPanel.h
@@ -62,12 +62,24 @@
 
         struct audio_patch              mAudioPatch;
         audio_patch_handle_t            mHandle;
+        // handle for audio HAL patch handle present only when the audio HAL version is >= 3.0
         audio_patch_handle_t            mHalHandle;
+        // below members are used by a software audio patch connecting a source device from a
+        // given audio HW module to a sink device on an other audio HW module.
+        // playback thread created by createAudioPatch() and released by clearPatchConnections() if
+        // no existing playback thread can be used by the software patch
         sp<PlaybackThread>              mPlaybackThread;
+        // audio track created by createPatchConnections() and released by clearPatchConnections()
         sp<PlaybackThread::PatchTrack>  mPatchTrack;
+        // record thread created by createAudioPatch() and released by clearPatchConnections()
         sp<RecordThread>                mRecordThread;
+        // audio record created by createPatchConnections() and released by clearPatchConnections()
         sp<RecordThread::PatchRecord>   mPatchRecord;
+        // handle for audio patch connecting source device to record thread input.
+        // created by createPatchConnections() and released by clearPatchConnections()
         audio_patch_handle_t            mRecordPatchHandle;
+        // handle for audio patch connecting playback thread output to sink device
+        // created by createPatchConnections() and released by clearPatchConnections()
         audio_patch_handle_t            mPlaybackPatchHandle;
 
     };
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 270e27f..5601bde 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -33,7 +33,7 @@
                                 const sp<IMemory>& sharedBuffer,
                                 audio_session_t sessionId,
                                 int uid,
-                                IAudioFlinger::track_flags_t flags,
+                                audio_output_flags_t flags,
                                 track_type type);
     virtual             ~Track();
     virtual status_t    initCheck() const;
@@ -55,8 +55,9 @@
             audio_stream_type_t streamType() const {
                 return mStreamType;
             }
-            bool        isOffloaded() const { return (mFlags & IAudioFlinger::TRACK_OFFLOAD) != 0; }
-            bool        isDirect() const { return (mFlags & IAudioFlinger::TRACK_DIRECT) != 0; }
+            bool        isOffloaded() const
+                                { return (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0; }
+            bool        isDirect() const { return (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0; }
             status_t    setParameters(const String8& keyValuePairs);
             status_t    attachAuxEffect(int EffectId);
             void        setAuxBuffer(int EffectId, int32_t *buffer);
@@ -72,6 +73,8 @@
 
     virtual status_t    setSyncEvent(const sp<SyncEvent>& event);
 
+    virtual bool        isFastTrack() const { return (mFlags & AUDIO_OUTPUT_FLAG_FAST) != 0; }
+
 protected:
     // for numerous
     friend class PlaybackThread;
@@ -166,7 +169,7 @@
     AudioTrackServerProxy*  mAudioTrackServerProxy;
     bool                mResumeToStopping; // track was paused in stopping state.
     bool                mFlushHwPending; // track requests for thread flush
-
+    audio_output_flags_t mFlags;
 };  // end of Track
 
 
@@ -226,7 +229,7 @@
                                    audio_format_t format,
                                    size_t frameCount,
                                    void *buffer,
-                                   IAudioFlinger::track_flags_t flags);
+                                   audio_output_flags_t flags);
     virtual             ~PatchTrack();
 
     virtual status_t    start(AudioSystem::sync_event_t event =
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index 13396a6..123e033 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -31,7 +31,7 @@
                                 void *buffer,
                                 audio_session_t sessionId,
                                 int uid,
-                                IAudioFlinger::track_flags_t flags,
+                                audio_input_flags_t flags,
                                 track_type type);
     virtual             ~RecordTrack();
     virtual status_t    initCheck() const;
@@ -58,6 +58,9 @@
                                              int64_t sourceFramesRead,
                                              uint32_t halSampleRate,
                                              const ExtendedTimestamp &timestamp);
+
+    virtual bool        isFastTrack() const { return (mFlags & AUDIO_INPUT_FLAG_FAST) != 0; }
+
 private:
     friend class AudioFlinger;  // for mState
 
@@ -86,6 +89,7 @@
 
             // used by the record thread to convert frames to proper destination format
             RecordBufferConverter              *mRecordBufferConverter;
+            audio_input_flags_t                mFlags;
 };
 
 // playback track, used by PatchPanel
@@ -98,7 +102,7 @@
                 audio_format_t format,
                 size_t frameCount,
                 void *buffer,
-                IAudioFlinger::track_flags_t flags);
+                audio_input_flags_t flags);
     virtual             ~PatchRecord();
 
     // AudioBufferProvider interface
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 3759424..2547746 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -1256,6 +1256,133 @@
     }
 }
 
+// checkEffectCompatibility_l() must be called with ThreadBase::mLock held
+status_t AudioFlinger::RecordThread::checkEffectCompatibility_l(
+        const effect_descriptor_t *desc, audio_session_t sessionId)
+{
+    // No global effect sessions on record threads
+    if (sessionId == AUDIO_SESSION_OUTPUT_MIX || sessionId == AUDIO_SESSION_OUTPUT_STAGE) {
+        ALOGW("checkEffectCompatibility_l(): global effect %s on record thread %s",
+                desc->name, mThreadName);
+        return BAD_VALUE;
+    }
+    // only pre processing effects on record thread
+    if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_PRE_PROC) {
+        ALOGW("checkEffectCompatibility_l(): non pre processing effect %s on record thread %s",
+                desc->name, mThreadName);
+        return BAD_VALUE;
+    }
+    audio_input_flags_t flags = mInput->flags;
+    if (hasFastCapture() || (flags & AUDIO_INPUT_FLAG_FAST)) {
+        if (flags & AUDIO_INPUT_FLAG_RAW) {
+            ALOGW("checkEffectCompatibility_l(): effect %s on record thread %s in raw mode",
+                  desc->name, mThreadName);
+            return BAD_VALUE;
+        }
+        if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) == 0) {
+            ALOGW("checkEffectCompatibility_l(): non HW effect %s on record thread %s in fast mode",
+                  desc->name, mThreadName);
+            return BAD_VALUE;
+        }
+    }
+    return NO_ERROR;
+}
+
+// checkEffectCompatibility_l() must be called with ThreadBase::mLock held
+status_t AudioFlinger::PlaybackThread::checkEffectCompatibility_l(
+        const effect_descriptor_t *desc, audio_session_t sessionId)
+{
+    // no preprocessing on playback threads
+    if ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC) {
+        ALOGW("checkEffectCompatibility_l(): pre processing effect %s created on playback"
+                " thread %s", desc->name, mThreadName);
+        return BAD_VALUE;
+    }
+
+    switch (mType) {
+    case MIXER: {
+        // Reject any effect on mixer multichannel sinks.
+        // TODO: fix both format and multichannel issues with effects.
+        if (mChannelCount != FCC_2) {
+            ALOGW("checkEffectCompatibility_l(): effect %s for multichannel(%d) on MIXER"
+                    " thread %s", desc->name, mChannelCount, mThreadName);
+            return BAD_VALUE;
+        }
+        audio_output_flags_t flags = mOutput->flags;
+        if (hasFastMixer() || (flags & AUDIO_OUTPUT_FLAG_FAST)) {
+            if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
+                // global effects are applied only to non fast tracks if they are SW
+                if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) == 0) {
+                    break;
+                }
+            } else if (sessionId == AUDIO_SESSION_OUTPUT_STAGE) {
+                // only post processing on output stage session
+                if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC) {
+                    ALOGW("checkEffectCompatibility_l(): non post processing effect %s not allowed"
+                            " on output stage session", desc->name);
+                    return BAD_VALUE;
+                }
+            } else {
+                // no restriction on effects applied on non fast tracks
+                if ((hasAudioSession_l(sessionId) & ThreadBase::FAST_SESSION) == 0) {
+                    break;
+                }
+            }
+            if (flags & AUDIO_OUTPUT_FLAG_RAW) {
+                ALOGW("checkEffectCompatibility_l(): effect %s on playback thread in raw mode",
+                      desc->name);
+                return BAD_VALUE;
+            }
+            if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) == 0) {
+                ALOGW("checkEffectCompatibility_l(): non HW effect %s on playback thread"
+                        " in fast mode", desc->name);
+                return BAD_VALUE;
+            }
+        }
+    } break;
+    case OFFLOAD:
+        // nothing actionable on offload threads, if the effect:
+        //   - is offloadable: the effect can be created
+        //   - is NOT offloadable: the effect should still be created, but EffectHandle::enable()
+        //     will take care of invalidating the tracks of the thread
+        break;
+    case DIRECT:
+        // Reject any effect on Direct output threads for now, since the format of
+        // mSinkBuffer is not guaranteed to be compatible with effect processing (PCM 16 stereo).
+        ALOGW("checkEffectCompatibility_l(): effect %s on DIRECT output thread %s",
+                desc->name, mThreadName);
+        return BAD_VALUE;
+    case DUPLICATING:
+        // Reject any effect on mixer multichannel sinks.
+        // TODO: fix both format and multichannel issues with effects.
+        if (mChannelCount != FCC_2) {
+            ALOGW("checkEffectCompatibility_l(): effect %s for multichannel(%d)"
+                    " on DUPLICATING thread %s", desc->name, mChannelCount, mThreadName);
+            return BAD_VALUE;
+        }
+        if ((sessionId == AUDIO_SESSION_OUTPUT_STAGE) || (sessionId == AUDIO_SESSION_OUTPUT_MIX)) {
+            ALOGW("checkEffectCompatibility_l(): global effect %s on DUPLICATING"
+                    " thread %s", desc->name, mThreadName);
+            return BAD_VALUE;
+        }
+        if ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) {
+            ALOGW("checkEffectCompatibility_l(): post processing effect %s on"
+                    " DUPLICATING thread %s", desc->name, mThreadName);
+            return BAD_VALUE;
+        }
+        if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) != 0) {
+            ALOGW("checkEffectCompatibility_l(): HW tunneled effect %s on"
+                    " DUPLICATING thread %s", desc->name, mThreadName);
+            return BAD_VALUE;
+        }
+        break;
+    default:
+        LOG_ALWAYS_FATAL("checkEffectCompatibility_l(): wrong thread type %d", mType);
+    }
+
+    return NO_ERROR;
+}
+
 // ThreadBase::createEffect_l() must be called with AudioFlinger::mLock held
 sp<AudioFlinger::EffectHandle> AudioFlinger::ThreadBase::createEffect_l(
         const sp<AudioFlinger::Client>& client,
@@ -1280,54 +1407,16 @@
         goto Exit;
     }
 
-    // Reject any effect on Direct output threads for now, since the format of
-    // mSinkBuffer is not guaranteed to be compatible with effect processing (PCM 16 stereo).
-    if (mType == DIRECT) {
-        ALOGW("createEffect_l() Cannot add effect %s on Direct output type thread %s",
-                desc->name, mThreadName);
-        lStatus = BAD_VALUE;
-        goto Exit;
-    }
-
-    // Reject any effect on mixer or duplicating multichannel sinks.
-    // TODO: fix both format and multichannel issues with effects.
-    if ((mType == MIXER || mType == DUPLICATING) && mChannelCount != FCC_2) {
-        ALOGW("createEffect_l() Cannot add effect %s for multichannel(%d) %s threads",
-                desc->name, mChannelCount, mType == MIXER ? "MIXER" : "DUPLICATING");
-        lStatus = BAD_VALUE;
-        goto Exit;
-    }
-
-    // Allow global effects only on offloaded and mixer threads
-    if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
-        switch (mType) {
-        case MIXER:
-        case OFFLOAD:
-            break;
-        case DIRECT:
-        case DUPLICATING:
-        case RECORD:
-        default:
-            ALOGW("createEffect_l() Cannot add global effect %s on thread %s",
-                    desc->name, mThreadName);
-            lStatus = BAD_VALUE;
-            goto Exit;
-        }
-    }
-
-    // Only Pre processor effects are allowed on input threads and only on input threads
-    if ((mType == RECORD) != ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC)) {
-        ALOGW("createEffect_l() effect %s (flags %08x) created on wrong thread type %d",
-                desc->name, desc->flags, mType);
-        lStatus = BAD_VALUE;
-        goto Exit;
-    }
-
     ALOGV("createEffect_l() thread %p effect %s on session %d", this, desc->name, sessionId);
 
     { // scope for mLock
         Mutex::Autolock _l(mLock);
 
+        lStatus = checkEffectCompatibility_l(desc, sessionId);
+        if (lStatus != NO_ERROR) {
+            goto Exit;
+        }
+
         // check for existing effect chain with the requested audio session
         chain = getEffectChain_l(sessionId);
         if (chain == 0) {
@@ -1753,7 +1842,7 @@
         size_t *pFrameCount,
         const sp<IMemory>& sharedBuffer,
         audio_session_t sessionId,
-        IAudioFlinger::track_flags_t *flags,
+        audio_output_flags_t *flags,
         pid_t tid,
         int uid,
         status_t *status)
@@ -1761,9 +1850,22 @@
     size_t frameCount = *pFrameCount;
     sp<Track> track;
     status_t lStatus;
+    audio_output_flags_t outputFlags = mOutput->flags;
+
+    // special case for FAST flag considered OK if fast mixer is present
+    if (hasFastMixer()) {
+        outputFlags = (audio_output_flags_t)(outputFlags | AUDIO_OUTPUT_FLAG_FAST);
+    }
+
+    // Check if requested flags are compatible with output stream flags
+    if ((*flags & outputFlags) != *flags) {
+        ALOGW("createTrack_l(): mismatch between requested flags (%08x) and output flags (%08x)",
+              *flags, outputFlags);
+        *flags = (audio_output_flags_t)(*flags & outputFlags);
+    }
 
     // client expresses a preference for FAST, but we get the final say
-    if (*flags & IAudioFlinger::TRACK_FAST) {
+    if (*flags & AUDIO_OUTPUT_FLAG_FAST) {
       if (
             // PCM data
             audio_is_linear_pcm(format) &&
@@ -1791,8 +1893,44 @@
             }
             frameCount = max(frameCount, mFrameCount * sFastTrackMultiplier); // incl framecount 0
         }
-        ALOGV("AUDIO_OUTPUT_FLAG_FAST accepted: frameCount=%zu mFrameCount=%zu",
-                frameCount, mFrameCount);
+
+        // check compatibility with audio effects.
+        { // scope for mLock
+            Mutex::Autolock _l(mLock);
+            // do not accept RAW flag if post processing are present. Note that post processing on
+            // a fast mixer are necessarily hardware
+            sp<EffectChain> chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_STAGE);
+            if (chain != 0) {
+                ALOGV_IF((*flags & AUDIO_OUTPUT_FLAG_RAW) != 0,
+                        "AUDIO_OUTPUT_FLAG_RAW denied: post processing effect present");
+                *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_RAW);
+            }
+            // Do not accept FAST flag if software global effects are present
+            chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX);
+            if (chain != 0) {
+                ALOGV_IF((*flags & AUDIO_OUTPUT_FLAG_RAW) != 0,
+                        "AUDIO_OUTPUT_FLAG_RAW denied: global effect present");
+                *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_RAW);
+                if (chain->hasSoftwareEffect()) {
+                    ALOGV("AUDIO_OUTPUT_FLAG_FAST denied: software global effect present");
+                    *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_FAST);
+                }
+            }
+            // Do not accept FAST flag if the session has software effects
+            chain = getEffectChain_l(sessionId);
+            if (chain != 0) {
+                ALOGV_IF((*flags & AUDIO_OUTPUT_FLAG_RAW) != 0,
+                        "AUDIO_OUTPUT_FLAG_RAW denied: effect present on session");
+                *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_RAW);
+                if (chain->hasSoftwareEffect()) {
+                    ALOGV("AUDIO_OUTPUT_FLAG_FAST denied: software effect present on session");
+                    *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_FAST);
+                }
+            }
+        }
+        ALOGV_IF((*flags & AUDIO_OUTPUT_FLAG_FAST) != 0,
+                 "AUDIO_OUTPUT_FLAG_FAST accepted: frameCount=%zu mFrameCount=%zu",
+                 frameCount, mFrameCount);
       } else {
         ALOGV("AUDIO_OUTPUT_FLAG_FAST denied: sharedBuffer=%p frameCount=%zu "
                 "mFrameCount=%zu format=%#x mFormat=%#x isLinear=%d channelMask=%#x "
@@ -1801,7 +1939,7 @@
                 sharedBuffer.get(), frameCount, mFrameCount, format, mFormat,
                 audio_is_linear_pcm(format),
                 channelMask, sampleRate, mSampleRate, hasFastMixer(), tid, mFastTrackAvailMask);
-        *flags &= ~IAudioFlinger::TRACK_FAST;
+        *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_FAST);
       }
     }
     // For normal PCM streaming tracks, update minimum frame count.
@@ -1809,7 +1947,7 @@
     // to be at least 2 x the normal mixer frame count and cover audio hardware latency.
     // This is probably too conservative, but legacy application code may depend on it.
     // If you change this calculation, also review the start threshold which is related.
-    if (!(*flags & IAudioFlinger::TRACK_FAST)
+    if (!(*flags & AUDIO_OUTPUT_FLAG_FAST)
             && audio_has_proportional_frames(format) && sharedBuffer == 0) {
         // this must match AudioTrack.cpp calculateMinFrameCount().
         // TODO: Move to a common library
@@ -1917,7 +2055,7 @@
             chain->incTrackCnt();
         }
 
-        if ((*flags & IAudioFlinger::TRACK_FAST) && (tid != -1)) {
+        if ((*flags & AUDIO_OUTPUT_FLAG_FAST) && (tid != -1)) {
             pid_t callingPid = IPCThreadState::self()->getCallingPid();
             // we don't have CAP_SYS_NICE, nor do we want to have it as it's too powerful,
             // so ask activity manager to do this on our behalf
@@ -2161,6 +2299,12 @@
     mCallbackThread->resetDraining();
 }
 
+void AudioFlinger::PlaybackThread::errorCallback()
+{
+    ALOG_ASSERT(mCallbackThread != 0);
+    mCallbackThread->setAsyncError();
+}
+
 void AudioFlinger::PlaybackThread::resetWriteBlocked(uint32_t sequence)
 {
     Mutex::Autolock _l(mLock);
@@ -2195,6 +2339,9 @@
     case STREAM_CBK_EVENT_DRAIN_READY:
         me->drainCallback();
         break;
+    case STREAM_CBK_EVENT_ERROR:
+        me->errorCallback();
+        break;
     default:
         ALOGW("asyncCallback() unknown event %d", event);
         break;
@@ -2285,6 +2432,7 @@
             kUseFastMixer == FastMixer_Dynamic)) {
         size_t minNormalFrameCount = (kMinNormalSinkBufferSizeMs * mSampleRate) / 1000;
         size_t maxNormalFrameCount = (kMaxNormalSinkBufferSizeMs * mSampleRate) / 1000;
+
         // round up minimum and round down maximum to nearest 16 frames to satisfy AudioMixer
         minNormalFrameCount = (minNormalFrameCount + 15) & ~15;
         maxNormalFrameCount = maxNormalFrameCount & ~15;
@@ -2301,18 +2449,7 @@
                 multiplier = (double) maxNormalFrameCount / (double) mFrameCount;
             }
         } else {
-            // prefer an even multiplier, for compatibility with doubling of fast tracks due to HAL
-            // SRC (it would be unusual for the normal sink buffer size to not be a multiple of fast
-            // track, but we sometimes have to do this to satisfy the maximum frame count
-            // constraint)
-            // FIXME this rounding up should not be done if no HAL SRC
-            uint32_t truncMult = (uint32_t) multiplier;
-            if ((truncMult & 1)) {
-                if ((truncMult + 1) * mFrameCount <= maxNormalFrameCount) {
-                    ++truncMult;
-                }
-            }
-            multiplier = (double) truncMult;
+            multiplier = floor(multiplier);
         }
     }
     mNormalFrameCount = multiplier * mFrameCount;
@@ -2397,9 +2534,9 @@
     }
 }
 
-uint32_t AudioFlinger::PlaybackThread::hasAudioSession(audio_session_t sessionId) const
+// hasAudioSession_l() must be called with ThreadBase::mLock held
+uint32_t AudioFlinger::PlaybackThread::hasAudioSession_l(audio_session_t sessionId) const
 {
-    Mutex::Autolock _l(mLock);
     uint32_t result = 0;
     if (getEffectChain_l(sessionId) != 0) {
         result = EFFECT_SESSION;
@@ -2409,6 +2546,9 @@
         sp<Track> track = mTracks[i];
         if (sessionId == track->sessionId() && !track->isInvalid()) {
             result |= TRACK_SESSION;
+            if (track->isFastTrack()) {
+                result |= FAST_SESSION;
+            }
             break;
         }
     }
@@ -2910,7 +3050,7 @@
                 if (timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] >= 0) {
                     kernelLocationUpdate = true;
                 } else {
-                    ALOGV("getTimestamp error - no valid kernel position");
+                    ALOGVV("getTimestamp error - no valid kernel position");
                 }
 
                 // copy over kernel info
@@ -3175,6 +3315,11 @@
                         // (1) mixer threads without a fast mixer (which has its own warm-up)
                         // (2) minimum buffer sized tracks (even if the track is full,
                         //     the app won't fill fast enough to handle the sudden draw).
+                        //
+                        // Total time spent in last processing cycle equals time spent in
+                        // 1. threadLoop_write, as well as time spent in
+                        // 2. threadLoop_mix (significant for heavy mixing, especially
+                        //                    on low tier processors)
 
                         // it's OK if deltaMs is an overestimate.
                         const int32_t deltaMs =
@@ -3188,6 +3333,9 @@
                                     " ret(%zd) deltaMs(%d) requires sleep %d ms",
                                     this, ret, deltaMs, throttleMs);
                             mThreadThrottleTimeMs += throttleMs;
+                            // Throttle must be attributed to the previous mixer loop's write time
+                            // to allow back-to-back throttling.
+                            lastWriteFinished += throttleMs * 1000000;
                         } else {
                             uint32_t diff = mThreadThrottleTimeMs - mThreadThrottleEndMs;
                             if (diff > 0) {
@@ -3765,6 +3913,13 @@
     broadcast_l();
 }
 
+void AudioFlinger::PlaybackThread::onAsyncError()
+{
+    for (int i = AUDIO_STREAM_SYSTEM; i < (int)AUDIO_STREAM_CNT; i++) {
+        invalidateTracks((audio_stream_type_t)i);
+    }
+}
+
 void AudioFlinger::MixerThread::threadLoop_mix()
 {
     // mix buffers...
@@ -5081,7 +5236,8 @@
     :   Thread(false /*canCallJava*/),
         mPlaybackThread(playbackThread),
         mWriteAckSequence(0),
-        mDrainSequence(0)
+        mDrainSequence(0),
+        mAsyncError(false)
 {
 }
 
@@ -5099,11 +5255,13 @@
     while (!exitPending()) {
         uint32_t writeAckSequence;
         uint32_t drainSequence;
+        bool asyncError;
 
         {
             Mutex::Autolock _l(mLock);
             while (!((mWriteAckSequence & 1) ||
                      (mDrainSequence & 1) ||
+                     mAsyncError ||
                      exitPending())) {
                 mWaitWorkCV.wait(mLock);
             }
@@ -5117,6 +5275,8 @@
             mWriteAckSequence &= ~1;
             drainSequence = mDrainSequence;
             mDrainSequence &= ~1;
+            asyncError = mAsyncError;
+            mAsyncError = false;
         }
         {
             sp<AudioFlinger::PlaybackThread> playbackThread = mPlaybackThread.promote();
@@ -5127,6 +5287,9 @@
                 if (drainSequence & 1) {
                     playbackThread->resetDraining(drainSequence >> 1);
                 }
+                if (asyncError) {
+                    playbackThread->onAsyncError();
+                }
             }
         }
     }
@@ -5175,6 +5338,13 @@
     }
 }
 
+void AudioFlinger::AsyncCallbackThread::setAsyncError()
+{
+    Mutex::Autolock _l(mLock);
+    mAsyncError = true;
+    mWaitWorkCV.signal();
+}
+
 
 // ----------------------------------------------------------------------------
 AudioFlinger::OffloadThread::OffloadThread(const sp<AudioFlinger>& audioFlinger,
@@ -5844,14 +6014,6 @@
     for (;;) {
         Vector< sp<EffectChain> > effectChains;
 
-        // sleep with mutex unlocked
-        if (sleepUs > 0) {
-            ATRACE_BEGIN("sleep");
-            usleep(sleepUs);
-            ATRACE_END();
-            sleepUs = 0;
-        }
-
         // activeTracks accumulates a copy of a subset of mActiveTracks
         Vector< sp<RecordTrack> > activeTracks;
 
@@ -5872,6 +6034,15 @@
                 break;
             }
 
+            // sleep with mutex unlocked
+            if (sleepUs > 0) {
+                ATRACE_BEGIN("sleep");
+                mWaitWorkCV.waitRelative(mLock, microseconds((nsecs_t)sleepUs));
+                ATRACE_END();
+                sleepUs = 0;
+                continue;
+            }
+
             // if no active track(s), then standby and release wakelock
             size_t size = mActiveTracks.size();
             if (size == 0) {
@@ -5895,6 +6066,7 @@
             }
 
             bool doBroadcast = false;
+            bool allStopped = true;
             for (size_t i = 0; i < size; ) {
 
                 activeTrack = mActiveTracks[i];
@@ -5923,15 +6095,18 @@
                 case TrackBase::STARTING_1:
                     sleepUs = 10000;
                     i++;
+                    allStopped = false;
                     continue;
 
                 case TrackBase::STARTING_2:
                     doBroadcast = true;
                     mStandby = false;
                     activeTrack->mState = TrackBase::ACTIVE;
+                    allStopped = false;
                     break;
 
                 case TrackBase::ACTIVE:
+                    allStopped = false;
                     break;
 
                 case TrackBase::IDLE:
@@ -5951,6 +6126,10 @@
                     fastTrack = activeTrack;
                 }
             }
+
+            if (allStopped) {
+                standbyIfNotAlreadyInStandby();
+            }
             if (doBroadcast) {
                 mStartStopCond.broadcast();
             }
@@ -6282,16 +6461,30 @@
         audio_session_t sessionId,
         size_t *notificationFrames,
         int uid,
-        IAudioFlinger::track_flags_t *flags,
+        audio_input_flags_t *flags,
         pid_t tid,
         status_t *status)
 {
     size_t frameCount = *pFrameCount;
     sp<RecordTrack> track;
     status_t lStatus;
+    audio_input_flags_t inputFlags = mInput->flags;
+
+    // special case for FAST flag considered OK if fast capture is present
+    if (hasFastCapture()) {
+        inputFlags = (audio_input_flags_t)(inputFlags | AUDIO_INPUT_FLAG_FAST);
+    }
+
+    // Check if requested flags are compatible with output stream flags
+    if ((*flags & inputFlags) != *flags) {
+        ALOGW("createRecordTrack_l(): mismatch between requested flags (%08x) and"
+                " input flags (%08x)",
+              *flags, inputFlags);
+        *flags = (audio_input_flags_t)(*flags & inputFlags);
+    }
 
     // client expresses a preference for FAST, but we get the final say
-    if (*flags & IAudioFlinger::TRACK_FAST) {
+    if (*flags & AUDIO_INPUT_FLAG_FAST) {
       if (
             // we formerly checked for a callback handler (non-0 tid),
             // but that is no longer required for TRANSFER_OBTAIN mode
@@ -6311,8 +6504,22 @@
             // there are sufficient fast track slots available
             mFastTrackAvail
         ) {
-        ALOGV("AUDIO_INPUT_FLAG_FAST accepted: frameCount=%zu mFrameCount=%zu",
-                frameCount, mFrameCount);
+          // check compatibility with audio effects.
+          Mutex::Autolock _l(mLock);
+          // Do not accept FAST flag if the session has software effects
+          sp<EffectChain> chain = getEffectChain_l(sessionId);
+          if (chain != 0) {
+              ALOGV_IF((*flags & AUDIO_INPUT_FLAG_RAW) != 0,
+                      "AUDIO_INPUT_FLAG_RAW denied: effect present on session");
+              *flags = (audio_input_flags_t)(*flags & ~AUDIO_INPUT_FLAG_RAW);
+              if (chain->hasSoftwareEffect()) {
+                  ALOGV("AUDIO_INPUT_FLAG_FAST denied: software effect present on session");
+                  *flags = (audio_input_flags_t)(*flags & ~AUDIO_INPUT_FLAG_FAST);
+              }
+          }
+          ALOGV_IF((*flags & AUDIO_INPUT_FLAG_FAST) != 0,
+                   "AUDIO_INPUT_FLAG_FAST accepted: frameCount=%zu mFrameCount=%zu",
+                   frameCount, mFrameCount);
       } else {
         ALOGV("AUDIO_INPUT_FLAG_FAST denied: frameCount=%zu mFrameCount=%zu mPipeFramesP2=%zu "
                 "format=%#x isLinear=%d channelMask=%#x sampleRate=%u mSampleRate=%u "
@@ -6320,12 +6527,12 @@
                 frameCount, mFrameCount, mPipeFramesP2,
                 format, audio_is_linear_pcm(format), channelMask, sampleRate, mSampleRate,
                 hasFastCapture(), tid, mFastTrackAvail);
-        *flags &= ~IAudioFlinger::TRACK_FAST;
+        *flags = (audio_input_flags_t)(*flags & ~AUDIO_INPUT_FLAG_FAST);
       }
     }
 
     // compute track buffer size in frames, and suggest the notification frame count
-    if (*flags & IAudioFlinger::TRACK_FAST) {
+    if (*flags & AUDIO_INPUT_FLAG_FAST) {
         // fast track: frame count is exactly the pipe depth
         frameCount = mPipeFramesP2;
         // ignore requested notificationFrames, and always notify exactly once every HAL buffer
@@ -6381,7 +6588,7 @@
         setEffectSuspended_l(FX_IID_AEC, suspend, sessionId);
         setEffectSuspended_l(FX_IID_NS, suspend, sessionId);
 
-        if ((*flags & IAudioFlinger::TRACK_FAST) && (tid != -1)) {
+        if ((*flags & AUDIO_INPUT_FLAG_FAST) && (tid != -1)) {
             pid_t callingPid = IPCThreadState::self()->getCallingPid();
             // we don't have CAP_SYS_NICE, nor do we want to have it as it's too powerful,
             // so ask activity manager to do this on our behalf
@@ -6505,6 +6712,8 @@
     }
     // note that threadLoop may still be processing the track at this point [without lock]
     recordTrack->mState = TrackBase::PAUSING;
+    // signal thread to stop
+    mWaitWorkCV.broadcast();
     // do not wait for mStartStopCond if exiting
     if (exitPending()) {
         return true;
@@ -7202,9 +7411,9 @@
     return mInput->stream->get_input_frames_lost(mInput->stream);
 }
 
-uint32_t AudioFlinger::RecordThread::hasAudioSession(audio_session_t sessionId) const
+// hasAudioSession_l() must be called with ThreadBase::mLock held
+uint32_t AudioFlinger::RecordThread::hasAudioSession_l(audio_session_t sessionId) const
 {
-    Mutex::Autolock _l(mLock);
     uint32_t result = 0;
     if (getEffectChain_l(sessionId) != 0) {
         result = EFFECT_SESSION;
@@ -7213,6 +7422,9 @@
     for (size_t i = 0; i < mTracks.size(); ++i) {
         if (sessionId == mTracks[i]->sessionId()) {
             result |= TRACK_SESSION;
+            if (mTracks[i]->isFastTrack()) {
+                result |= FAST_SESSION;
+            }
             break;
         }
     }
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 787b5c4..2fd7eeb 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -301,8 +301,10 @@
                 enum effect_state {
                     EFFECT_SESSION = 0x1,   // the audio session corresponds to at least one
                                             // effect
-                    TRACK_SESSION = 0x2     // the audio session corresponds to at least one
+                    TRACK_SESSION = 0x2,    // the audio session corresponds to at least one
                                             // track
+                    FAST_SESSION = 0x4      // the audio session corresponds to at least one
+                                            // fast track
                 };
 
                 // get effect chain corresponding to session Id.
@@ -335,9 +337,16 @@
                 void removeEffect_l(const sp< EffectModule>& effect);
                 // detach all tracks connected to an auxiliary effect
     virtual     void detachAuxEffect_l(int effectId __unused) {}
-                // returns either EFFECT_SESSION if effects on this audio session exist in one
-                // chain, or TRACK_SESSION if tracks on this audio session exist, or both
-                virtual uint32_t hasAudioSession(audio_session_t sessionId) const = 0;
+                // returns a combination of:
+                // - EFFECT_SESSION if effects on this audio session exist in one chain
+                // - TRACK_SESSION if tracks on this audio session exist
+                // - FAST_SESSION if fast tracks on this audio session exist
+    virtual     uint32_t hasAudioSession_l(audio_session_t sessionId) const = 0;
+                uint32_t hasAudioSession(audio_session_t sessionId) const {
+                    Mutex::Autolock _l(mLock);
+                    return hasAudioSession_l(sessionId);
+                }
+
                 // the value returned by default implementation is not important as the
                 // strategy is only meaningful for PlaybackThread which implements this method
                 virtual uint32_t getStrategyForSession_l(audio_session_t sessionId __unused)
@@ -374,6 +383,10 @@
 
                         void systemReady();
 
+                // checkEffectCompatibility_l() must be called with ThreadBase::mLock held
+                virtual status_t    checkEffectCompatibility_l(const effect_descriptor_t *desc,
+                                                               audio_session_t sessionId) = 0;
+
     mutable     Mutex                   mLock;
 
 protected:
@@ -506,6 +519,9 @@
     // RefBase
     virtual     void        onFirstRef();
 
+    virtual     status_t    checkEffectCompatibility_l(const effect_descriptor_t *desc,
+                                                       audio_session_t sessionId);
+
 protected:
     // Code snippets that were lifted up out of threadLoop()
     virtual     void        threadLoop_mix() = 0;
@@ -527,6 +543,7 @@
                 void        resetWriteBlocked(uint32_t sequence);
                 void        drainCallback();
                 void        resetDraining(uint32_t sequence);
+                void        errorCallback();
 
     static      int         asyncCallback(stream_callback_event_t event, void *param, void *cookie);
 
@@ -534,6 +551,7 @@
     virtual     bool        waitingAsyncCallback_l();
     virtual     bool        shouldStandby_l();
     virtual     void        onAddNewTrack_l();
+                void        onAsyncError(); // error reported by AsyncCallbackThread
 
     // ThreadBase virtuals
     virtual     void        preExit();
@@ -566,7 +584,7 @@
                                 size_t *pFrameCount,
                                 const sp<IMemory>& sharedBuffer,
                                 audio_session_t sessionId,
-                                IAudioFlinger::track_flags_t *flags,
+                                audio_output_flags_t *flags,
                                 pid_t tid,
                                 int uid,
                                 status_t *status /*non-NULL*/);
@@ -605,7 +623,7 @@
 
                 virtual status_t addEffectChain_l(const sp<EffectChain>& chain);
                 virtual size_t removeEffectChain_l(const sp<EffectChain>& chain);
-                virtual uint32_t hasAudioSession(audio_session_t sessionId) const;
+                virtual uint32_t hasAudioSession_l(audio_session_t sessionId) const;
                 virtual uint32_t getStrategyForSession_l(audio_session_t sessionId);
 
 
@@ -1028,6 +1046,7 @@
             void        resetWriteBlocked();
             void        setDraining(uint32_t sequence);
             void        resetDraining();
+            void        setAsyncError();
 
 private:
     const wp<PlaybackThread>   mPlaybackThread;
@@ -1041,6 +1060,7 @@
     uint32_t                   mDrainSequence;
     Condition                  mWaitWorkCV;
     Mutex                      mLock;
+    bool                       mAsyncError;
 };
 
 class DuplicatingThread : public MixerThread {
@@ -1258,7 +1278,7 @@
                     audio_session_t sessionId,
                     size_t *notificationFrames,
                     int uid,
-                    IAudioFlinger::track_flags_t *flags,
+                    audio_input_flags_t *flags,
                     pid_t tid,
                     status_t *status /*non-NULL*/);
 
@@ -1292,7 +1312,7 @@
 
     virtual status_t addEffectChain_l(const sp<EffectChain>& chain);
     virtual size_t removeEffectChain_l(const sp<EffectChain>& chain);
-    virtual uint32_t hasAudioSession(audio_session_t sessionId) const;
+    virtual uint32_t hasAudioSession_l(audio_session_t sessionId) const;
 
             // Return the set of unique session IDs across all tracks.
             // The keys are the session IDs, and the associated values are meaningless.
@@ -1308,6 +1328,9 @@
             bool        hasFastCapture() const { return mFastCapture != 0; }
     virtual void        getAudioPortConfig(struct audio_port_config *config);
 
+    virtual status_t    checkEffectCompatibility_l(const effect_descriptor_t *desc,
+                                                   audio_session_t sessionId);
+
 private:
             // Enter standby if not already in standby, and set mStandby flag
             void    standbyIfNotAlreadyInStandby();
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index 67a5e58..6b97246 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -63,7 +63,6 @@
                                 void *buffer,
                                 audio_session_t sessionId,
                                 int uid,
-                                IAudioFlinger::track_flags_t flags,
                                 bool isOut,
                                 alloc_type alloc = ALLOC_CBLK,
                                 track_type type = TYPE_DEFAULT);
@@ -81,7 +80,7 @@
 
             sp<IMemory> getBuffers() const { return mBufferMemory; }
             void*       buffer() const { return mBuffer; }
-            bool        isFastTrack() const { return (mFlags & IAudioFlinger::TRACK_FAST) != 0; }
+    virtual bool        isFastTrack() const = 0;
             bool        isOutputTrack() const { return (mType == TYPE_OUTPUT); }
             bool        isPatchTrack() const { return (mType == TYPE_PATCH); }
             bool        isExternalTrack() const { return !isOutputTrack() && !isPatchTrack(); }
@@ -156,7 +155,6 @@
     const audio_session_t mSessionId;
     int                 mUid;
     Vector < sp<SyncEvent> >mSyncEvents;
-    const IAudioFlinger::track_flags_t mFlags;
     const bool          mIsOut;
     ServerProxy*        mServerProxy;
     const int           mId;
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 364e339..b387af3 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -73,7 +73,6 @@
             void *buffer,
             audio_session_t sessionId,
             int clientUid,
-            IAudioFlinger::track_flags_t flags,
             bool isOut,
             alloc_type alloc,
             track_type type)
@@ -93,7 +92,6 @@
                 mChannelCount * audio_bytes_per_sample(format) : sizeof(int8_t)),
         mFrameCount(frameCount),
         mSessionId(sessionId),
-        mFlags(flags),
         mIsOut(isOut),
         mServerProxy(NULL),
         mId(android_atomic_inc(&nextTrackId)),
@@ -345,11 +343,11 @@
             const sp<IMemory>& sharedBuffer,
             audio_session_t sessionId,
             int uid,
-            IAudioFlinger::track_flags_t flags,
+            audio_output_flags_t flags,
             track_type type)
     :   TrackBase(thread, client, sampleRate, format, channelMask, frameCount,
                   (sharedBuffer != 0) ? sharedBuffer->pointer() : buffer,
-                  sessionId, uid, flags, true /*isOut*/,
+                  sessionId, uid, true /*isOut*/,
                   (type == TYPE_PATCH) ? ( buffer == NULL ? ALLOC_LOCAL : ALLOC_NONE) : ALLOC_CBLK,
                   type),
     mFillingUpStatus(FS_INVALID),
@@ -368,7 +366,8 @@
     mIsInvalid(false),
     mAudioTrackServerProxy(NULL),
     mResumeToStopping(false),
-    mFlushHwPending(false)
+    mFlushHwPending(false),
+    mFlags(flags)
 {
     // client == 0 implies sharedBuffer == 0
     ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
@@ -395,7 +394,7 @@
         return;
     }
     // only allocate a fast track index if we were able to allocate a normal track name
-    if (flags & IAudioFlinger::TRACK_FAST) {
+    if (flags & AUDIO_OUTPUT_FLAG_FAST) {
         // FIXME: Not calling framesReadyIsCalledByMultipleThreads() exposes a potential
         // race with setSyncEvent(). However, if we call it, we cannot properly start
         // static fast tracks (SoundPool) immediately after stopping.
@@ -1133,7 +1132,7 @@
             int uid)
     :   Track(playbackThread, NULL, AUDIO_STREAM_PATCH,
               sampleRate, format, channelMask, frameCount,
-              NULL, 0, AUDIO_SESSION_NONE, uid, IAudioFlinger::TRACK_DEFAULT,
+              NULL, 0, AUDIO_SESSION_NONE, uid, AUDIO_OUTPUT_FLAG_NONE,
               TYPE_OUTPUT),
     mActive(false), mSourceThread(sourceThread), mClientProxy(NULL)
 {
@@ -1329,7 +1328,7 @@
                                                      audio_format_t format,
                                                      size_t frameCount,
                                                      void *buffer,
-                                                     IAudioFlinger::track_flags_t flags)
+                                                     audio_output_flags_t flags)
     :   Track(playbackThread, NULL, streamType,
               sampleRate, format, channelMask, frameCount,
               buffer, 0, AUDIO_SESSION_NONE, getuid(), flags, TYPE_PATCH),
@@ -1468,19 +1467,19 @@
             void *buffer,
             audio_session_t sessionId,
             int uid,
-            IAudioFlinger::track_flags_t flags,
+            audio_input_flags_t flags,
             track_type type)
     :   TrackBase(thread, client, sampleRate, format,
-                  channelMask, frameCount, buffer, sessionId, uid,
-                  flags, false /*isOut*/,
+                  channelMask, frameCount, buffer, sessionId, uid, false /*isOut*/,
                   (type == TYPE_DEFAULT) ?
-                          ((flags & IAudioFlinger::TRACK_FAST) ? ALLOC_PIPE : ALLOC_CBLK) :
+                          ((flags & AUDIO_INPUT_FLAG_FAST) ? ALLOC_PIPE : ALLOC_CBLK) :
                           ((buffer == NULL) ? ALLOC_LOCAL : ALLOC_NONE),
                   type),
         mOverflow(false),
         mFramesToDrop(0),
         mResamplerBufferProvider(NULL), // initialize in case of early constructor exit
-        mRecordBufferConverter(NULL)
+        mRecordBufferConverter(NULL),
+        mFlags(flags)
 {
     if (mCblk == NULL) {
         return;
@@ -1505,7 +1504,7 @@
 
     mResamplerBufferProvider = new ResamplerBufferProvider(this);
 
-    if (flags & IAudioFlinger::TRACK_FAST) {
+    if (flags & AUDIO_INPUT_FLAG_FAST) {
         ALOG_ASSERT(thread->mFastTrackAvail);
         thread->mFastTrackAvail = false;
     }
@@ -1664,7 +1663,7 @@
                                                      audio_format_t format,
                                                      size_t frameCount,
                                                      void *buffer,
-                                                     IAudioFlinger::track_flags_t flags)
+                                                     audio_input_flags_t flags)
     :   RecordTrack(recordThread, NULL, sampleRate, format, channelMask, frameCount,
                 buffer, AUDIO_SESSION_NONE, getuid(), flags, TYPE_PATCH),
                 mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, false, true))
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
index 8b45adc..c8e5148 100644
--- a/services/audiopolicy/Android.mk
+++ b/services/audiopolicy/Android.mk
@@ -50,6 +50,7 @@
 LOCAL_MODULE:= libaudiopolicyservice
 
 LOCAL_CFLAGS += -fvisibility=hidden
+LOCAL_CFLAGS += -Wall -Werror
 
 include $(BUILD_SHARED_LIBRARY)
 
@@ -102,6 +103,8 @@
 LOCAL_CFLAGS += -DUSE_XML_AUDIO_POLICY_CONF
 endif #ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
 
+LOCAL_CFLAGS += -Wall -Werror
+
 LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
 
 LOCAL_MODULE:= libaudiopolicymanagerdefault
@@ -125,6 +128,8 @@
     $(TOPDIR)frameworks/av/services/audiopolicy/common/include \
     $(TOPDIR)frameworks/av/services/audiopolicy/engine/interface
 
+LOCAL_CFLAGS := -Wall -Werror
+
 LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
 
 LOCAL_MODULE:= libaudiopolicymanager
diff --git a/services/audiopolicy/common/managerdefinitions/Android.mk b/services/audiopolicy/common/managerdefinitions/Android.mk
index 3b4ae6b..d7da0ad 100644
--- a/services/audiopolicy/common/managerdefinitions/Android.mk
+++ b/services/audiopolicy/common/managerdefinitions/Android.mk
@@ -60,6 +60,8 @@
 
 LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
 
+LOCAL_CFLAGS := -Wall -Werror
+
 LOCAL_MODULE := libaudiopolicycomponents
 
 include $(BUILD_STATIC_LIBRARY)
diff --git a/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h b/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
index b828f81..1612714 100644
--- a/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
+++ b/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
@@ -131,6 +131,7 @@
 typedef TypeConverter<StreamTraits> StreamTypeConverter;
 typedef TypeConverter<DeviceCategoryTraits> DeviceCategoryConverter;
 
+inline
 static SampleRateTraits::Collection samplingRatesFromString(const std::string &samplingRates,
                                                             const char *del = "|")
 {
@@ -139,6 +140,7 @@
     return samplingRateCollection;
 }
 
+inline
 static FormatTraits::Collection formatsFromString(const std::string &formats, const char *del = "|")
 {
     FormatTraits::Collection formatCollection;
@@ -146,6 +148,7 @@
     return formatCollection;
 }
 
+inline
 static audio_format_t formatFromString(const std::string &literalFormat)
 {
     audio_format_t format;
@@ -156,6 +159,7 @@
     return format;
 }
 
+inline
 static audio_channel_mask_t channelMaskFromString(const std::string &literalChannels)
 {
     audio_channel_mask_t channels;
@@ -166,6 +170,7 @@
     return channels;
 }
 
+inline
 static ChannelTraits::Collection channelMasksFromString(const std::string &channels,
                                                         const char *del = "|")
 {
@@ -176,6 +181,7 @@
     return channelMaskCollection;
 }
 
+inline
 static InputChannelTraits::Collection inputChannelMasksFromString(const std::string &inChannels,
                                                                   const char *del = "|")
 {
@@ -185,6 +191,7 @@
     return inputChannelMaskCollection;
 }
 
+inline
 static OutputChannelTraits::Collection outputChannelMasksFromString(const std::string &outChannels,
                                                                     const char *del = "|")
 {
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index 35f078e..50453ad 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -104,7 +104,6 @@
 
 ssize_t DeviceVector::remove(const sp<DeviceDescriptor>& item)
 {
-    size_t i;
     ssize_t ret = indexOf(item);
 
     if (ret < 0) {
diff --git a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
index f639551..48bfd79 100644
--- a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
@@ -142,6 +142,7 @@
     MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_DTS),
     MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_DTS_HD),
     MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_IEC61937),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_DOLBY_TRUEHD),
 };
 template<>
 const size_t FormatConverter::mSize = sizeof(FormatConverter::mTable) /
diff --git a/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp b/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
index ab2b51f..14caf7c 100644
--- a/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
@@ -34,7 +34,8 @@
     // Where would this volume index been inserted in the curve point
     size_t indexInUiPosition = mCurvePoints.orderOf(CurvePoint(volIdx, 0));
     if (indexInUiPosition >= nbCurvePoints) {
-        return 0.0f; // out of bounds
+        //use last point of table
+        return mCurvePoints[nbCurvePoints - 1].mAttenuationInMb / 100.0f;
     }
     if (indexInUiPosition == 0) {
         if (indexInUiPosition != mCurvePoints[0].mIndex) {
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 00fd05a..ff38df4 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -775,7 +775,6 @@
         const audio_offload_info_t *offloadInfo)
 {
     audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
-    uint32_t latency = 0;
     status_t status;
 
 #ifdef AUDIO_POLICY_TEST
@@ -1192,7 +1191,7 @@
                 }
             }
         }
-        uint32_t muteWaitMs = setOutputDevice(outputDesc, device, force, 0, NULL, address);
+        (void) /*uint32_t muteWaitMs*/ setOutputDevice(outputDesc, device, force, 0, NULL, address);
 
         // handle special case for sonification while in call
         if (isInCall()) {
@@ -1288,7 +1287,6 @@
             // force restoring the device selection on other active outputs if it differs from the
             // one being selected for this output
             for (size_t i = 0; i < mOutputs.size(); i++) {
-                audio_io_handle_t curOutput = mOutputs.keyAt(i);
                 sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i);
                 if (desc != outputDesc &&
                         desc->isActive() &&
@@ -1488,6 +1486,8 @@
                                   profileFlags);
         if (profile != 0) {
             break; // success
+        } else if (profileFlags & AUDIO_INPUT_FLAG_RAW) {
+            profileFlags = (audio_input_flags_t) (profileFlags & ~AUDIO_INPUT_FLAG_RAW); // retry
         } else if (profileFlags != AUDIO_INPUT_FLAG_NONE) {
             profileFlags = AUDIO_INPUT_FLAG_NONE; // retry
         } else { // fail
@@ -1793,7 +1793,7 @@
         ssize_t patch_index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
         if (patch_index >= 0) {
             sp<AudioPatch> patchDesc = mAudioPatches.valueAt(patch_index);
-            status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
+            (void) /*status_t status*/ mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
             mAudioPatches.removeItemsAt(patch_index);
             patchRemoved = true;
         }
@@ -2706,7 +2706,6 @@
                            true,
                            NULL);
         } else if (patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE) {
-            audio_patch_handle_t afPatchHandle = patchDesc->mAfPatchHandle;
             status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
             ALOGV("releaseAudioPatch() patch panel returned %d patchHandle %d",
                                                               status, patchDesc->mAfPatchHandle);
@@ -3185,6 +3184,7 @@
     }
     mEngine->setObserver(this);
     status_t status = mEngine->initCheck();
+    (void) status;
     ALOG_ASSERT(status == NO_ERROR, "Policy engine not initialized(err=%d)", status);
 
     // mAvailableOutputDevices and mAvailableInputDevices now contain all attached devices
@@ -4064,7 +4064,7 @@
     ssize_t index = mAudioPatches.indexOfKey(outputDesc->getPatchHandle());
     if (index >= 0) {
         sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
-        status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
+        (void) /*status_t status*/ mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
         mAudioPatches.removeItemsAt(index);
         mpClientInterface->onAudioPatchListUpdate();
     }
@@ -4093,7 +4093,7 @@
     ssize_t index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
     if (index >= 0) {
         sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
-        status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
+        (void) /*status_t status*/ mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
         mAudioPatches.removeItemsAt(index);
         mpClientInterface->onAudioPatchListUpdate();
     }
@@ -4373,7 +4373,7 @@
         }
         routing_strategy curStrategy = getStrategy((audio_stream_type_t)curStream);
         audio_devices_t curDevices =
-                getDeviceForStrategy((routing_strategy)curStrategy, true /*fromCache*/);
+                getDeviceForStrategy((routing_strategy)curStrategy, false /*fromCache*/);
         SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(curDevices, mOutputs);
         for (size_t i = 0; i < outputs.size(); i++) {
             sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(outputs[i]);
@@ -5398,7 +5398,6 @@
                                              AudioProfileVector &profiles)
 {
     String8 reply;
-    char *value;
 
     // Format MUST be checked first to update the list of AudioProfile
     if (profiles.hasDynamicFormat()) {
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index a6cd50e..f6e24e4 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -52,9 +52,11 @@
 
 static const nsecs_t kAudioCommandTimeoutNs = seconds(3); // 3 seconds
 
+#ifdef USE_LEGACY_AUDIO_POLICY
 namespace {
     extern struct audio_policy_service_ops aps_ops;
 };
+#endif
 
 // ----------------------------------------------------------------------------
 
@@ -66,11 +68,6 @@
 
 void AudioPolicyService::onFirstRef()
 {
-    char value[PROPERTY_VALUE_MAX];
-    const struct hw_module_t *module;
-    int forced_val;
-    int rc;
-
     {
         Mutex::Autolock _l(mLock);
 
@@ -85,7 +82,8 @@
         ALOGI("AudioPolicyService CSTOR in legacy mode");
 
         /* instantiate the audio policy manager */
-        rc = hw_get_module(AUDIO_POLICY_HARDWARE_MODULE_ID, &module);
+        const struct hw_module_t *module;
+        int rc = hw_get_module(AUDIO_POLICY_HARDWARE_MODULE_ID, &module);
         if (rc) {
             return;
         }
@@ -1198,6 +1196,7 @@
 int aps_set_voice_volume(void *service, float volume, int delay_ms);
 };
 
+#ifdef USE_LEGACY_AUDIO_POLICY
 namespace {
     struct audio_policy_service_ops aps_ops = {
         .open_output           = aps_open_output,
@@ -1220,5 +1219,6 @@
         .open_input_on_module  = aps_open_input_on_module,
     };
 }; // namespace <unnamed>
+#endif
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index ff73c28..4510d36 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -911,8 +911,9 @@
 // Can camera service trust the caller based on the calling UID?
 static bool isTrustedCallingUid(uid_t uid) {
     switch (uid) {
-        case AID_MEDIA:         // mediaserver
+        case AID_MEDIA:        // mediaserver
         case AID_CAMERASERVER: // cameraserver
+        case AID_RADIO:        // telephony
             return true;
         default:
             return false;
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index c8e64fe..005dd69 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -322,6 +322,9 @@
             p.fastInfo.bestStillCaptureFpsRange[0],
             p.fastInfo.bestStillCaptureFpsRange[1]);
 
+    result.appendFormat("    Use zero shutter lag: %s\n",
+            p.useZeroShutterLag() ? "yes" : "no");
+
     result.append("  Current streams:\n");
     result.appendFormat("    Preview stream ID: %d\n",
             getPreviewStreamId());
@@ -813,7 +816,7 @@
         }
     }
 
-    if (params.zslMode && !params.recordingHint &&
+    if (params.useZeroShutterLag() &&
             getRecordingStreamId() == NO_STREAM) {
         res = updateProcessorStream(mZslProcessor, params);
         if (res != OK) {
@@ -1362,7 +1365,7 @@
 
             return OK;
         }
-        if (l.mParameters.zslMode) {
+        if (l.mParameters.allowZslMode) {
             mZslProcessor->clearZslQueue();
         }
     }
@@ -1460,7 +1463,7 @@
 
         // Clear ZSL buffer queue when Jpeg size is changed.
         bool jpegStreamChanged = mJpegProcessor->getStreamId() != lastJpegStreamId;
-        if (l.mParameters.zslMode && jpegStreamChanged) {
+        if (l.mParameters.allowZslMode && jpegStreamChanged) {
             ALOGV("%s: Camera %d: Clear ZSL buffer queue when Jpeg size is changed",
                     __FUNCTION__, mCameraId);
             mZslProcessor->clearZslQueue();
@@ -1495,7 +1498,7 @@
     if (res != OK) return res;
     Parameters::focusMode_t focusModeAfter = l.mParameters.focusMode;
 
-    if (l.mParameters.zslMode && focusModeAfter != focusModeBefore) {
+    if (l.mParameters.allowZslMode && focusModeAfter != focusModeBefore) {
         mZslProcessor->clearZslQueue();
     }
 
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
index e3d6906..05adb29 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
@@ -338,7 +338,7 @@
         return DONE;
     }
 
-    else if (l.mParameters.zslMode &&
+    else if (l.mParameters.useZeroShutterLag() &&
             l.mParameters.state == Parameters::STILL_CAPTURE &&
             l.mParameters.flashMode != Parameters::FLASH_MODE_ON) {
         nextState = ZSL_START;
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
index ffe96fc..d6d8dde 100644
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
@@ -67,10 +67,15 @@
 }
 
 void JpegProcessor::onBufferReleased(const BufferInfo& bufferInfo) {
-    Mutex::Autolock l(mInputMutex);
     ALOGV("%s", __FUNCTION__);
-
     if (bufferInfo.mError) {
+        // Only lock in case of error, since we get one of these for each
+        // onFrameAvailable as well, and scheduling may delay this call late
+        // enough to run into later preview restart operations, for non-error
+        // cases.
+        // b/29524651
+        ALOGV("%s: JPEG buffer lost", __FUNCTION__);
+        Mutex::Autolock l(mInputMutex);
         mCaptureDone = true;
         mCaptureSuccess = false;
         mCaptureDoneSignal.signal();
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 5779176..9d5f33c 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -908,12 +908,12 @@
     property_get("camera.disable_zsl_mode", value, "0");
     if (!strcmp(value,"1") || slowJpegMode) {
         ALOGI("Camera %d: Disabling ZSL mode", cameraId);
-        zslMode = false;
+        allowZslMode = false;
     } else {
-        zslMode = true;
+        allowZslMode = true;
     }
 
-    ALOGI("%s: zslMode: %d slowJpegMode %d", __FUNCTION__, zslMode, slowJpegMode);
+    ALOGI("%s: allowZslMode: %d slowJpegMode %d", __FUNCTION__, allowZslMode, slowJpegMode);
 
     state = STOPPED;
 
@@ -1127,6 +1127,8 @@
     ALOGV("Camera %d: Flexible YUV %s supported",
             cameraId, fastInfo.useFlexibleYuv ? "is" : "is not");
 
+    fastInfo.maxJpegSize = getMaxSize(getAvailableJpegSizes());
+
     return OK;
 }
 
@@ -2231,6 +2233,25 @@
     return pictureSizeOverriden;
 }
 
+bool Parameters::useZeroShutterLag() const {
+    // If ZSL mode is disabled, don't use it
+    if (!allowZslMode) return false;
+    // If recording hint is enabled, don't do ZSL
+    if (recordingHint) return false;
+    // If still capture size is no bigger than preview or video size,
+    // don't do ZSL
+    if (pictureWidth <= previewWidth || pictureHeight <= previewHeight ||
+            pictureWidth <= videoWidth || pictureHeight <= videoHeight) {
+        return false;
+    }
+    // If still capture size is less than quarter of max, don't do ZSL
+    if ((pictureWidth * pictureHeight) <
+            (fastInfo.maxJpegSize.width * fastInfo.maxJpegSize.height / 4) ) {
+        return false;
+    }
+    return true;
+}
+
 const char* Parameters::getStateName(State state) {
 #define CASE_ENUM_TO_CHAR(x) case x: return(#x); break;
     switch(state) {
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index c437722..f4bb34c 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -161,9 +161,9 @@
     bool previewCallbackOneShot;
     bool previewCallbackSurface;
 
-    bool zslMode;
+    bool allowZslMode;
     // Whether the jpeg stream is slower than 30FPS and can slow down preview.
-    // When slowJpegMode is true, zslMode must be false to avoid slowing down preview.
+    // When slowJpegMode is true, allowZslMode must be false to avoid slowing down preview.
     bool slowJpegMode;
 
     // Overall camera state
@@ -219,6 +219,7 @@
         DefaultKeyedVector<uint8_t, OverrideModes> sceneModeOverrides;
         float minFocalLength;
         bool useFlexibleYuv;
+        Size maxJpegSize;
     } fastInfo;
 
     // Quirks information; these are short-lived flags to enable workarounds for
@@ -271,6 +272,8 @@
     status_t recoverOverriddenJpegSize();
     // if video snapshot size is currently overridden
     bool isJpegSizeOverridden();
+    // whether zero shutter lag should be used for non-recording operation
+    bool useZeroShutterLag() const;
 
     // Calculate the crop region rectangle, either tightly about the preview
     // resolution, or a region just based on the active array; both take
diff --git a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
index a2c9712..de42fb2 100644
--- a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
@@ -115,7 +115,7 @@
 
         // Use CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG for ZSL streaming case.
         if (client->getCameraDeviceVersion() >= CAMERA_DEVICE_API_VERSION_3_0) {
-            if (params.zslMode && !params.recordingHint) {
+            if (params.useZeroShutterLag() && !params.recordingHint) {
                 res = device->createDefaultRequest(CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG,
                         &mPreviewRequest);
             } else {
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index dbec34e..a7fe5e7 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -338,13 +338,15 @@
 
     status_t err = mDevice->configureStreams(isConstrainedHighSpeed);
     if (err == BAD_VALUE) {
-        res = STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
-                "Camera %d: Unsupported set of inputs/outputs provided",
+        String8 msg = String8::format("Camera %d: Unsupported set of inputs/outputs provided",
                 mCameraId);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        res = STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
     } else if (err != OK) {
-        res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
-                "Camera %d: Error configuring streams: %s (%d)",
+        String8 msg = String8::format("Camera %d: Error configuring streams: %s (%d)",
                 mCameraId, strerror(-err), err);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
     }
 
     return res;
@@ -365,6 +367,7 @@
 
     bool isInput = false;
     ssize_t index = NAME_NOT_FOUND;
+    ssize_t dIndex = NAME_NOT_FOUND;
 
     if (mInputStream.configured && mInputStream.id == streamId) {
         isInput = true;
@@ -378,10 +381,19 @@
         }
 
         if (index == NAME_NOT_FOUND) {
-            String8 msg = String8::format("Camera %d: Invalid stream ID (%d) specified, no such "
-                    "stream created yet", mCameraId, streamId);
-            ALOGW("%s: %s", __FUNCTION__, msg.string());
-            return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+            // See if this stream is one of the deferred streams.
+            for (size_t i = 0; i < mDeferredStreams.size(); ++i) {
+                if (streamId == mDeferredStreams[i]) {
+                    dIndex = i;
+                    break;
+                }
+            }
+            if (dIndex == NAME_NOT_FOUND) {
+                String8 msg = String8::format("Camera %d: Invalid stream ID (%d) specified, no such"
+                        " stream created yet", mCameraId, streamId);
+                ALOGW("%s: %s", __FUNCTION__, msg.string());
+                return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+            }
         }
     }
 
@@ -396,8 +408,10 @@
     } else {
         if (isInput) {
             mInputStream.configured = false;
-        } else {
+        } else if (index != NAME_NOT_FOUND) {
             mStreamMap.removeItemsAt(index);
+        } else {
+            mDeferredStreams.removeItemsAt(dIndex);
         }
     }
 
@@ -416,14 +430,30 @@
     Mutex::Autolock icl(mBinderSerializationLock);
 
     sp<IGraphicBufferProducer> bufferProducer = outputConfiguration.getGraphicBufferProducer();
-    if (bufferProducer == NULL) {
-        ALOGE("%s: bufferProducer must not be null", __FUNCTION__);
+    bool deferredConsumer = bufferProducer == NULL;
+    int surfaceType = outputConfiguration.getSurfaceType();
+    bool validSurfaceType = ((surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_VIEW) ||
+            (surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_TEXTURE));
+    if (deferredConsumer && !validSurfaceType) {
+        ALOGE("%s: Target surface is invalid: bufferProducer = %p, surfaceType = %d.",
+                __FUNCTION__, bufferProducer.get(), surfaceType);
         return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Target Surface is invalid");
     }
+
     if (!mDevice.get()) {
         return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
     }
 
+    int width, height, format;
+    int32_t consumerUsage;
+    android_dataspace dataSpace;
+    status_t err;
+
+    // Create stream for deferred surface case.
+    if (deferredConsumer) {
+        return createDeferredSurfaceStreamLocked(outputConfiguration, newStreamId);
+    }
+
     // Don't create multiple streams for the same target surface
     {
         ssize_t index = mStreamMap.indexOfKey(IInterface::asBinder(bufferProducer));
@@ -435,13 +465,10 @@
         }
     }
 
-    status_t err;
-
     // HACK b/10949105
     // Query consumer usage bits to set async operation mode for
     // GLConsumer using controlledByApp parameter.
     bool useAsync = false;
-    int32_t consumerUsage;
     if ((err = bufferProducer->query(NATIVE_WINDOW_CONSUMER_USAGE_BITS,
             &consumerUsage)) != OK) {
         String8 msg = String8::format("Camera %d: Failed to query Surface consumer usage: %s (%d)",
@@ -450,8 +477,8 @@
         return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
     }
     if (consumerUsage & GraphicBuffer::USAGE_HW_TEXTURE) {
-        ALOGW("%s: Camera %d: Forcing asynchronous mode for stream",
-                __FUNCTION__, mCameraId);
+        ALOGW("%s: Camera %d with consumer usage flag: 0x%x: Forcing asynchronous mode for stream",
+                __FUNCTION__, mCameraId, consumerUsage);
         useAsync = true;
     }
 
@@ -467,9 +494,6 @@
     sp<Surface> surface = new Surface(bufferProducer, useAsync);
     ANativeWindow *anw = surface.get();
 
-    int width, height, format;
-    android_dataspace dataSpace;
-
     if ((err = anw->query(anw, NATIVE_WINDOW_WIDTH, &width)) != OK) {
         String8 msg = String8::format("Camera %d: Failed to query Surface width: %s (%d)",
                 mCameraId, strerror(-err), err);
@@ -526,29 +550,12 @@
     } else {
         mStreamMap.add(binder, streamId);
 
-        ALOGV("%s: Camera %d: Successfully created a new stream ID %d",
-              __FUNCTION__, mCameraId, streamId);
+        ALOGV("%s: Camera %d: Successfully created a new stream ID %d for output surface"
+                " (%d x %d) with format 0x%x.",
+              __FUNCTION__, mCameraId, streamId, width, height, format);
 
-        /**
-         * Set the stream transform flags to automatically
-         * rotate the camera stream for preview use cases.
-         */
-        int32_t transform = 0;
-        err = getRotationTransformLocked(&transform);
-
-        if (err != OK) {
-            // Error logged by getRotationTransformLocked.
-            return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION,
-                    "Unable to calculate rotation transform for new stream");
-        }
-
-        err = mDevice->setStreamTransform(streamId, transform);
-        if (err != OK) {
-            String8 msg = String8::format("Failed to set stream transform (stream id %d)",
-                    streamId);
-            ALOGE("%s: %s", __FUNCTION__, msg.string());
-            return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
-        }
+        // Set transform flags to ensure preview to be rotated correctly.
+        res = setStreamTransformLocked(streamId);
 
         *newStreamId = streamId;
     }
@@ -556,6 +563,84 @@
     return res;
 }
 
+binder::Status CameraDeviceClient::createDeferredSurfaceStreamLocked(
+        const hardware::camera2::params::OutputConfiguration &outputConfiguration,
+        /*out*/
+        int* newStreamId) {
+    int width, height, format, surfaceType;
+    int32_t consumerUsage;
+    android_dataspace dataSpace;
+    status_t err;
+    binder::Status res;
+
+    if (!mDevice.get()) {
+        return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
+    }
+
+    // Infer the surface info for deferred surface stream creation.
+    width = outputConfiguration.getWidth();
+    height = outputConfiguration.getHeight();
+    surfaceType = outputConfiguration.getSurfaceType();
+    format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+    dataSpace = android_dataspace_t::HAL_DATASPACE_UNKNOWN;
+    // Hardcode consumer usage flags: SurfaceView--0x900, SurfaceTexture--0x100.
+    consumerUsage = GraphicBuffer::USAGE_HW_TEXTURE;
+    if (surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_VIEW) {
+        consumerUsage |= GraphicBuffer::USAGE_HW_COMPOSER;
+    }
+    int streamId = camera3::CAMERA3_STREAM_ID_INVALID;
+    err = mDevice->createStream(/*surface*/nullptr, width, height, format, dataSpace,
+            static_cast<camera3_stream_rotation_t>(outputConfiguration.getRotation()),
+            &streamId, outputConfiguration.getSurfaceSetID(), consumerUsage);
+
+    if (err != OK) {
+        res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
+                "Camera %d: Error creating output stream (%d x %d, fmt %x, dataSpace %x): %s (%d)",
+                mCameraId, width, height, format, dataSpace, strerror(-err), err);
+    } else {
+        // Can not add streamId to mStreamMap here, as the surface is deferred. Add it to
+        // a separate list to track. Once the deferred surface is set, this id will be
+        // relocated to mStreamMap.
+        mDeferredStreams.push_back(streamId);
+
+        ALOGV("%s: Camera %d: Successfully created a new stream ID %d for a deferred surface"
+                " (%d x %d) stream with format 0x%x.",
+              __FUNCTION__, mCameraId, streamId, width, height, format);
+
+        // Set transform flags to ensure preview to be rotated correctly.
+        res = setStreamTransformLocked(streamId);
+
+        *newStreamId = streamId;
+    }
+    return res;
+}
+
+binder::Status CameraDeviceClient::setStreamTransformLocked(int streamId) {
+    int32_t transform = 0;
+    status_t err;
+    binder::Status res;
+
+    if (!mDevice.get()) {
+        return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
+    }
+
+    err = getRotationTransformLocked(&transform);
+    if (err != OK) {
+        // Error logged by getRotationTransformLocked.
+        return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION,
+                "Unable to calculate rotation transform for new stream");
+    }
+
+    err = mDevice->setStreamTransform(streamId, transform);
+    if (err != OK) {
+        String8 msg = String8::format("Failed to set stream transform (stream id %d)",
+                streamId);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+    }
+
+    return res;
+}
 
 binder::Status CameraDeviceClient::createInputStream(
         int width, int height, int format,
@@ -934,6 +1019,76 @@
     return res;
 }
 
+binder::Status CameraDeviceClient::setDeferredConfiguration(int32_t streamId,
+        const hardware::camera2::params::OutputConfiguration &outputConfiguration) {
+    ATRACE_CALL();
+
+    binder::Status res;
+    if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
+
+    Mutex::Autolock icl(mBinderSerializationLock);
+
+    sp<IGraphicBufferProducer> bufferProducer = outputConfiguration.getGraphicBufferProducer();
+
+    // Client code should guarantee that the surface is from SurfaceView or SurfaceTexture.
+    if (bufferProducer == NULL) {
+        ALOGE("%s: bufferProducer must not be null", __FUNCTION__);
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Target Surface is invalid");
+    }
+    // Check if this stram id is one of the deferred streams
+    ssize_t index = NAME_NOT_FOUND;
+    for (size_t i = 0; i < mDeferredStreams.size(); i++) {
+        if (streamId == mDeferredStreams[i]) {
+            index = i;
+            break;
+        }
+    }
+    if (index == NAME_NOT_FOUND) {
+        String8 msg = String8::format("Camera %d: deferred surface is set to a unknown stream"
+                "(ID %d)", mCameraId, streamId);
+        ALOGW("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+    }
+
+    if (!mDevice.get()) {
+        return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
+    }
+
+    // Don't create multiple streams for the same target surface
+    {
+        ssize_t index = mStreamMap.indexOfKey(IInterface::asBinder(bufferProducer));
+        if (index != NAME_NOT_FOUND) {
+            String8 msg = String8::format("Camera %d: Surface already has a stream created "
+                    " for it (ID %zd)", mCameraId, index);
+            ALOGW("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_ALREADY_EXISTS, msg.string());
+        }
+    }
+
+    status_t err;
+
+    // Always set to async, as we know the deferred surface is for preview streaming.
+    sp<Surface> consumerSurface = new Surface(bufferProducer, /*useAsync*/true);
+
+    // Finish the deferred stream configuration with the surface.
+    err = mDevice->setConsumerSurface(streamId, consumerSurface);
+    if (err == OK) {
+        sp<IBinder> binder = IInterface::asBinder(bufferProducer);
+        mStreamMap.add(binder, streamId);
+        mDeferredStreams.removeItemsAt(index);
+    } else if (err == NO_INIT) {
+        res = STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
+                "Camera %d: Deferred surface is invalid: %s (%d)",
+                mCameraId, strerror(-err), err);
+    } else {
+        res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
+                "Camera %d: Error setting output stream deferred surface: %s (%d)",
+                mCameraId, strerror(-err), err);
+    }
+
+    return res;
+}
+
 status_t CameraDeviceClient::dump(int fd, const Vector<String16>& args) {
     return BasicClient::dump(fd, args);
 }
@@ -959,6 +1114,11 @@
         for (size_t i = 0; i < mStreamMap.size(); i++) {
             result.appendFormat("      Stream %d\n", mStreamMap.valueAt(i));
         }
+    } else if (!mDeferredStreams.isEmpty()) {
+        result.append("    Current deferred surface output stream IDs:\n");
+        for (auto& streamId : mDeferredStreams) {
+            result.appendFormat("      Stream %d\n", streamId);
+        }
     } else {
         result.append("    No output streams configured.\n");
     }
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index d792b7d..dde23fb 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -131,6 +131,10 @@
     // Prepare stream by preallocating up to maxCount of its buffers
     virtual binder::Status prepare2(int32_t maxCount, int32_t streamId);
 
+    // Set the deferred surface for a stream.
+    virtual binder::Status setDeferredConfiguration(int32_t streamId,
+            const hardware::camera2::params::OutputConfiguration &outputConfiguration);
+
     /**
      * Interface used by CameraService
      */
@@ -188,6 +192,15 @@
     // Find the square of the euclidean distance between two points
     static int64_t euclidDistSquare(int32_t x0, int32_t y0, int32_t x1, int32_t y1);
 
+    // Create an output stream with surface deferred for future.
+    binder::Status createDeferredSurfaceStreamLocked(
+            const hardware::camera2::params::OutputConfiguration &outputConfiguration,
+            int* newStreamId = NULL);
+
+    // Set the stream transform flags to automatically rotate the camera stream for preview use
+    // cases.
+    binder::Status setStreamTransformLocked(int streamId);
+
     // Find the closest dimensions for a given format in available stream configurations with
     // a width <= ROUNDING_WIDTH_CAP
     static const int32_t ROUNDING_WIDTH_CAP = 1920;
@@ -213,6 +226,10 @@
 
     int32_t mRequestIdCounter;
 
+    // The list of output streams whose surfaces are deferred. We have to track them separately
+    // as there are no surfaces available and can not be put into mStreamMap. Once the deferred
+    // Surface is configured, the stream id will be moved to mStreamMap.
+    Vector<int32_t> mDeferredStreams;
 };
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 35ec531..8707f2a 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -110,7 +110,8 @@
     virtual status_t createStream(sp<Surface> consumer,
             uint32_t width, uint32_t height, int format,
             android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
-            int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID) = 0;
+            int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
+            uint32_t consumerUsage = 0) = 0;
 
     /**
      * Create an input stream of width, height, and format.
@@ -312,6 +313,12 @@
      * Get the HAL device version.
      */
     virtual uint32_t getDeviceVersion() = 0;
+
+    /**
+     * Set the deferred consumer surface and finish the rest of the stream configuration.
+     */
+    virtual status_t setConsumerSurface(int streamId, sp<Surface> consumer) = 0;
+
 };
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 96f9338..bbe7317 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -990,12 +990,13 @@
 
 status_t Camera3Device::createStream(sp<Surface> consumer,
         uint32_t width, uint32_t height, int format, android_dataspace dataSpace,
-        camera3_stream_rotation_t rotation, int *id, int streamSetId) {
+        camera3_stream_rotation_t rotation, int *id, int streamSetId, uint32_t consumerUsage) {
     ATRACE_CALL();
     Mutex::Autolock il(mInterfaceLock);
     Mutex::Autolock l(mLock);
-    ALOGV("Camera %d: Creating new stream %d: %d x %d, format %d, dataspace %d rotation %d",
-            mId, mNextStreamId, width, height, format, dataSpace, rotation);
+    ALOGV("Camera %d: Creating new stream %d: %d x %d, format %d, dataspace %d rotation %d"
+            " consumer usage 0x%x", mId, mNextStreamId, width, height, format, dataSpace, rotation,
+            consumerUsage);
 
     status_t res;
     bool wasActive = false;
@@ -1032,6 +1033,19 @@
     if (mDeviceVersion <= CAMERA_DEVICE_API_VERSION_3_2) {
         streamSetId = CAMERA3_STREAM_SET_ID_INVALID;
     }
+
+    // HAL3.1 doesn't support deferred consumer stream creation as it requires buffer registration
+    // which requires a consumer surface to be available.
+    if (consumer == nullptr && mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_2) {
+        ALOGE("HAL3.1 doesn't support deferred consumer stream creation");
+        return BAD_VALUE;
+    }
+
+    if (consumer == nullptr && format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+        ALOGE("Deferred consumer stream creation only support IMPLEMENTATION_DEFINED format");
+        return BAD_VALUE;
+    }
+
     // Use legacy dataspace values for older HALs
     if (mDeviceVersion <= CAMERA_DEVICE_API_VERSION_3_3) {
         dataSpace = mapToLegacyDataspace(dataSpace);
@@ -1063,6 +1077,10 @@
         newStream = new Camera3OutputStream(mNextStreamId, consumer,
                 width, height, rawOpaqueBufferSize, format, dataSpace, rotation,
                 mTimestampOffset, streamSetId);
+    } else if (consumer == nullptr) {
+        newStream = new Camera3OutputStream(mNextStreamId,
+                width, height, format, consumerUsage, dataSpace, rotation,
+                mTimestampOffset, streamSetId);
     } else {
         newStream = new Camera3OutputStream(mNextStreamId, consumer,
                 width, height, format, dataSpace, rotation,
@@ -1722,6 +1740,44 @@
     }
 }
 
+status_t Camera3Device::setConsumerSurface(int streamId, sp<Surface> consumer) {
+    ATRACE_CALL();
+    ALOGV("%s: Camera %d: set consumer surface for stream %d", __FUNCTION__, mId, streamId);
+    Mutex::Autolock il(mInterfaceLock);
+    Mutex::Autolock l(mLock);
+
+    if (consumer == nullptr) {
+        CLOGE("Null consumer is passed!");
+        return BAD_VALUE;
+    }
+
+    ssize_t idx = mOutputStreams.indexOfKey(streamId);
+    if (idx == NAME_NOT_FOUND) {
+        CLOGE("Stream %d is unknown", streamId);
+        return idx;
+    }
+    sp<Camera3OutputStreamInterface> stream = mOutputStreams[idx];
+    status_t res = stream->setConsumer(consumer);
+    if (res != OK) {
+        CLOGE("Stream %d set consumer failed (error %d %s) ", streamId, res, strerror(-res));
+        return res;
+    }
+
+    if (!stream->isConfiguring()) {
+        CLOGE("Stream %d was already fully configured.", streamId);
+        return INVALID_OPERATION;
+    }
+
+    res = stream->finishConfiguration(mHal3Device);
+    if (res != OK) {
+        SET_ERR_L("Can't finish configuring output stream %d: %s (%d)",
+                stream->getId(), strerror(-res), res);
+        return res;
+    }
+
+    return OK;
+}
+
 /**
  * Camera3Device private methods
  */
@@ -1781,6 +1837,13 @@
         sp<Camera3OutputStreamInterface> stream =
                 mOutputStreams.editValueAt(idx);
 
+        // It is illegal to include a deferred consumer output stream into a request
+        if (stream->isConsumerConfigurationDeferred()) {
+            CLOGE("Stream %d hasn't finished configuration yet due to deferred consumer",
+                    stream->getId());
+            return NULL;
+        }
+
         // Lazy completion of stream configuration (allocation/registration)
         // on first use
         if (stream->isConfiguring()) {
@@ -1948,7 +2011,7 @@
     for (size_t i = 0; i < mOutputStreams.size(); i++) {
         sp<Camera3OutputStreamInterface> outputStream =
             mOutputStreams.editValueAt(i);
-        if (outputStream->isConfiguring()) {
+        if (outputStream->isConfiguring() && !outputStream->isConsumerConfigurationDeferred()) {
             res = outputStream->finishConfiguration(mHal3Device);
             if (res != OK) {
                 CLOGE("Can't finish configuring output stream %d: %s (%d)",
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 2aca57d..fe5f217 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -95,11 +95,14 @@
 
     // Actual stream creation/deletion is delayed until first request is submitted
     // If adding streams while actively capturing, will pause device before adding
-    // stream, reconfiguring device, and unpausing.
+    // stream, reconfiguring device, and unpausing. If the client create a stream
+    // with nullptr consumer surface, the client must then call setConsumer()
+    // and finish the stream configuration before starting output streaming.
     virtual status_t createStream(sp<Surface> consumer,
             uint32_t width, uint32_t height, int format,
             android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
-            int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID);
+            int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
+            uint32_t consumerUsage = 0);
     virtual status_t createInputStream(
             uint32_t width, uint32_t height, int format,
             int *id);
@@ -160,6 +163,12 @@
     // Methods called by subclasses
     void             notifyStatus(bool idle); // updates from StatusTracker
 
+    /**
+     * Set the deferred consumer surface to the output stream and finish the deferred
+     * consumer configuration.
+     */
+    virtual status_t setConsumerSurface(int streamId, sp<Surface> consumer);
+
   private:
     static const size_t        kDumpLockAttempts  = 10;
     static const size_t        kDumpSleepDuration = 100000; // 0.10 sec
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
index 6354ef7..5123785 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
@@ -103,6 +103,15 @@
     return false;
 }
 
+bool Camera3DummyStream::isConsumerConfigurationDeferred() const {
+    return false;
+}
+
+status_t Camera3DummyStream::setConsumer(sp<Surface> consumer) {
+    ALOGE("%s: Stream %d: Dummy stream doesn't support set consumer surface %p!",
+            __FUNCTION__, mId, consumer.get());
+    return INVALID_OPERATION;
+}
 }; // namespace camera3
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.h b/services/camera/libcameraservice/device3/Camera3DummyStream.h
index 7b48daa..639619e 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.h
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.h
@@ -61,6 +61,16 @@
      */
     bool isVideoStream() const;
 
+    /**
+     * Return if the consumer configuration of this stream is deferred.
+     */
+    virtual bool isConsumerConfigurationDeferred() const;
+
+    /**
+     * Set the consumer surface to the output stream.
+     */
+    virtual status_t setConsumer(sp<Surface> consumer);
+
   protected:
 
     /**
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 7b72144..299435a 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -42,7 +42,8 @@
         mTransform(0),
         mTraceFirstBuffer(true),
         mUseBufferManager(false),
-        mTimestampOffset(timestampOffset) {
+        mTimestampOffset(timestampOffset),
+        mConsumerUsage(0) {
 
     if (mConsumer == NULL) {
         ALOGE("%s: Consumer is NULL!", __FUNCTION__);
@@ -66,7 +67,8 @@
         mTraceFirstBuffer(true),
         mUseMonoTimestamp(false),
         mUseBufferManager(false),
-        mTimestampOffset(timestampOffset) {
+        mTimestampOffset(timestampOffset),
+        mConsumerUsage(0) {
 
     if (format != HAL_PIXEL_FORMAT_BLOB && format != HAL_PIXEL_FORMAT_RAW_OPAQUE) {
         ALOGE("%s: Bad format for size-only stream: %d", __FUNCTION__,
@@ -84,6 +86,39 @@
     }
 }
 
+Camera3OutputStream::Camera3OutputStream(int id,
+        uint32_t width, uint32_t height, int format,
+        uint32_t consumerUsage, android_dataspace dataSpace,
+        camera3_stream_rotation_t rotation, nsecs_t timestampOffset, int setId) :
+        Camera3IOStreamBase(id, CAMERA3_STREAM_OUTPUT, width, height,
+                            /*maxSize*/0, format, dataSpace, rotation, setId),
+        mConsumer(nullptr),
+        mTransform(0),
+        mTraceFirstBuffer(true),
+        mUseBufferManager(false),
+        mTimestampOffset(timestampOffset),
+        mConsumerUsage(consumerUsage) {
+    // Deferred consumer only support preview surface format now.
+    if (format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+        ALOGE("%s: Deferred consumer only supports IMPLEMENTATION_DEFINED format now!",
+                __FUNCTION__);
+        mState = STATE_ERROR;
+    }
+
+    // Sanity check for the consumer usage flag.
+    if ((consumerUsage & GraphicBuffer::USAGE_HW_TEXTURE) == 0 &&
+            (consumerUsage & GraphicBuffer::USAGE_HW_COMPOSER) == 0) {
+        ALOGE("%s: Deferred consumer usage flag is illegal (0x%x)!", __FUNCTION__, consumerUsage);
+        mState = STATE_ERROR;
+    }
+
+    mConsumerName = String8("Deferred");
+    if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
+        mBufferReleasedListener = new BufferReleasedListener(this);
+    }
+
+}
+
 Camera3OutputStream::Camera3OutputStream(int id, camera3_stream_type_t type,
                                          uint32_t width, uint32_t height,
                                          int format,
@@ -96,7 +131,8 @@
         mTransform(0),
         mTraceFirstBuffer(true),
         mUseMonoTimestamp(false),
-        mUseBufferManager(false) {
+        mUseBufferManager(false),
+        mConsumerUsage(0) {
 
     if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
         mBufferReleasedListener = new BufferReleasedListener(this);
@@ -235,6 +271,7 @@
      */
     if (buffer.status == CAMERA3_BUFFER_STATUS_ERROR) {
         // Cancel buffer
+        ALOGW("A frame is dropped for stream %d", mId);
         res = currentConsumer->cancelBuffer(currentConsumer.get(),
                 container_of(buffer.buffer, ANativeWindowBuffer, handle),
                 anwReleaseFence);
@@ -475,11 +512,16 @@
         return res;
     }
 
+    // Stream configuration was not finished (can only be in STATE_IN_CONFIG or STATE_CONSTRUCTED
+    // state), don't need change the stream state, return OK.
+    if (mConsumer == nullptr) {
+        return OK;
+    }
+
     ALOGV("%s: disconnecting stream %d from native window", __FUNCTION__, getId());
 
     res = native_window_api_disconnect(mConsumer.get(),
                                        NATIVE_WINDOW_API_CAMERA);
-
     /**
      * This is not an error. if client calling process dies, the window will
      * also die and all calls to it will return DEAD_OBJECT, thus it's already
@@ -521,6 +563,12 @@
 
     status_t res;
     int32_t u = 0;
+    if (mConsumer == nullptr) {
+        // mConsumerUsage was sanitized before the Camera3OutputStream was constructed.
+        *usage = mConsumerUsage;
+        return OK;
+    }
+
     res = static_cast<ANativeWindow*>(mConsumer.get())->query(mConsumer.get(),
             NATIVE_WINDOW_CONSUMER_USAGE_BITS, &u);
 
@@ -556,7 +604,7 @@
 status_t Camera3OutputStream::setBufferManager(sp<Camera3BufferManager> bufferManager) {
     Mutex::Autolock l(mLock);
     if (mState != STATE_CONSTRUCTED) {
-        ALOGE("%s: this method can only be called when stream in in CONSTRUCTED state.",
+        ALOGE("%s: this method can only be called when stream in CONSTRUCTED state.",
                 __FUNCTION__);
         return INVALID_OPERATION;
     }
@@ -622,6 +670,26 @@
     return OK;
 }
 
+bool Camera3OutputStream::isConsumerConfigurationDeferred() const {
+    Mutex::Autolock l(mLock);
+    return mConsumer == nullptr;
+}
+
+status_t Camera3OutputStream::setConsumer(sp<Surface> consumer) {
+    if (consumer == nullptr) {
+        ALOGE("%s: it's illegal to set a null consumer surface!", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+
+    if (mConsumer != nullptr) {
+        ALOGE("%s: consumer surface was already set!", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+
+    mConsumer = consumer;
+    return OK;
+}
+
 bool Camera3OutputStream::isConsumedByHWComposer() const {
     uint32_t usage = 0;
     status_t res = getEndpointUsage(&usage);
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 7d28b05..5507cfc 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -94,6 +94,16 @@
             android_dataspace dataSpace, camera3_stream_rotation_t rotation,
             nsecs_t timestampOffset, int setId = CAMERA3_STREAM_SET_ID_INVALID);
 
+    /**
+     * Set up a stream with deferred consumer for formats that have 2 dimensions, such as
+     * RAW and YUV. The consumer must be set before using this stream for output. A valid
+     * stream set id needs to be set to support buffer sharing between multiple streams.
+     */
+    Camera3OutputStream(int id, uint32_t width, uint32_t height, int format,
+            uint32_t consumerUsage, android_dataspace dataSpace,
+            camera3_stream_rotation_t rotation, nsecs_t timestampOffset,
+            int setId = CAMERA3_STREAM_SET_ID_INVALID);
+
     virtual ~Camera3OutputStream();
 
     /**
@@ -117,6 +127,16 @@
      */
     bool isConsumedByHWComposer() const;
 
+    /**
+     * Return if the consumer configuration of this stream is deferred.
+     */
+    virtual bool isConsumerConfigurationDeferred() const;
+
+    /**
+     * Set the consumer surface to the output stream.
+     */
+    virtual status_t setConsumer(sp<Surface> consumer);
+
     class BufferReleasedListener : public BnProducerListener {
         public:
           BufferReleasedListener(wp<Camera3OutputStream> parent) : mParent(parent) {}
@@ -159,6 +179,7 @@
     virtual status_t disconnectLocked();
 
     sp<Surface> mConsumer;
+
   private:
     int               mTransform;
 
@@ -195,6 +216,12 @@
     nsecs_t mTimestampOffset;
 
     /**
+     * Consumer end point usage flag set by the constructor for the deferred
+     * consumer case.
+     */
+    uint32_t    mConsumerUsage;
+
+    /**
      * Internal Camera3Stream interface
      */
     virtual status_t getBufferLocked(camera3_stream_buffer *buffer);
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
index 50dce55..3f83c89 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
@@ -41,6 +41,16 @@
     virtual bool isVideoStream() const = 0;
 
     /**
+     * Return if the consumer configuration of this stream is deferred.
+     */
+    virtual bool isConsumerConfigurationDeferred() const = 0;
+
+    /**
+     * Set the consumer surface to the output stream.
+     */
+    virtual status_t setConsumer(sp<Surface> consumer) = 0;
+
+    /**
      * Detach an unused buffer from the stream.
      *
      * buffer must be non-null; fenceFd may null, and if it is non-null, but
@@ -49,7 +59,6 @@
      *
      */
     virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd) = 0;
-
 };
 
 } // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 96d62d4..3ffd9d1 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -55,7 +55,7 @@
     mMaxSize(maxSize),
     mState(STATE_CONSTRUCTED),
     mStatusId(StatusTracker::NO_STATUS_ID),
-    mStreamUnpreparable(false),
+    mStreamUnpreparable(true),
     mOldUsage(0),
     mOldMaxBuffers(0),
     mPrepared(false),
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index 0755700..1ff215d 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -175,7 +175,8 @@
      *   OK on a successful configuration
      *   NO_INIT in case of a serious error from the HAL device
      *   NO_MEMORY in case of an error registering buffers
-     *   INVALID_OPERATION in case connecting to the consumer failed
+     *   INVALID_OPERATION in case connecting to the consumer failed or consumer
+     *       doesn't exist yet.
      */
     status_t         finishConfiguration(camera3_device *hal3Device);
 
diff --git a/services/soundtrigger/Android.mk b/services/soundtrigger/Android.mk
index e8e18b8..c55ac7f 100644
--- a/services/soundtrigger/Android.mk
+++ b/services/soundtrigger/Android.mk
@@ -40,6 +40,8 @@
 
 LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
 
+LOCAL_CFLAGS := -Wall -Werror
+
 LOCAL_MODULE:= libsoundtriggerservice
 
 include $(BUILD_SHARED_LIBRARY)