Merge "NuPlayer: timed text support" into lmp-dev
diff --git a/include/media/MediaProfiles.h b/include/media/MediaProfiles.h
index d202fbc..253c557 100644
--- a/include/media/MediaProfiles.h
+++ b/include/media/MediaProfiles.h
@@ -47,6 +47,14 @@
     CAMCORDER_QUALITY_TIME_LAPSE_QVGA = 1007,
     CAMCORDER_QUALITY_TIME_LAPSE_2160P = 1008,
     CAMCORDER_QUALITY_TIME_LAPSE_LIST_END = 1008,
+
+    CAMCORDER_QUALITY_HIGH_SPEED_LIST_START = 2000,
+    CAMCORDER_QUALITY_HIGH_SPEED_LOW  = 2000,
+    CAMCORDER_QUALITY_HIGH_SPEED_HIGH = 2001,
+    CAMCORDER_QUALITY_HIGH_SPEED_480P = 2002,
+    CAMCORDER_QUALITY_HIGH_SPEED_720P = 2003,
+    CAMCORDER_QUALITY_HIGH_SPEED_1080P = 2004,
+    CAMCORDER_QUALITY_HIGH_SPEED_LIST_END = 2004,
 };
 
 /**
diff --git a/include/media/mediarecorder.h b/include/media/mediarecorder.h
index 142cb90..b0a62a7 100644
--- a/include/media/mediarecorder.h
+++ b/include/media/mediarecorder.h
@@ -61,12 +61,18 @@
     OUTPUT_FORMAT_AAC_ADIF = 5,
     OUTPUT_FORMAT_AAC_ADTS = 6,
 
+    OUTPUT_FORMAT_AUDIO_ONLY_END = 7, // Used in validating the output format.  Should be the
+                                      //  at the end of the audio only output formats.
+
     /* Stream over a socket, limited to a single stream */
     OUTPUT_FORMAT_RTP_AVP = 7,
 
     /* H.264/AAC data encapsulated in MPEG2/TS */
     OUTPUT_FORMAT_MPEG2TS = 8,
 
+    /* VP8/VORBIS data in a WEBM container */
+    OUTPUT_FORMAT_WEBM = 9,
+
     OUTPUT_FORMAT_LIST_END // must be last - used to validate format type
 };
 
@@ -77,6 +83,7 @@
     AUDIO_ENCODER_AAC = 3,
     AUDIO_ENCODER_HE_AAC = 4,
     AUDIO_ENCODER_AAC_ELD = 5,
+    AUDIO_ENCODER_VORBIS = 6,
 
     AUDIO_ENCODER_LIST_END // must be the last - used to validate the audio encoder type
 };
@@ -86,6 +93,7 @@
     VIDEO_ENCODER_H263 = 1,
     VIDEO_ENCODER_H264 = 2,
     VIDEO_ENCODER_MPEG_4_SP = 3,
+    VIDEO_ENCODER_VP8 = 4,
 
     VIDEO_ENCODER_LIST_END // must be the last - used to validate the video encoder type
 };
diff --git a/include/media/stagefright/MPEG4Writer.h b/include/media/stagefright/MPEG4Writer.h
index 3ef6b9a..26ce5f9 100644
--- a/include/media/stagefright/MPEG4Writer.h
+++ b/include/media/stagefright/MPEG4Writer.h
@@ -63,8 +63,8 @@
     int32_t getTimeScale() const { return mTimeScale; }
 
     status_t setGeoData(int latitudex10000, int longitudex10000);
-    void setStartTimeOffsetMs(int ms) { mStartTimeOffsetMs = ms; }
-    int32_t getStartTimeOffsetMs() const { return mStartTimeOffsetMs; }
+    virtual void setStartTimeOffsetMs(int ms) { mStartTimeOffsetMs = ms; }
+    virtual int32_t getStartTimeOffsetMs() const { return mStartTimeOffsetMs; }
 
 protected:
     virtual ~MPEG4Writer();
diff --git a/include/media/stagefright/MediaCodec.h b/include/media/stagefright/MediaCodec.h
index 3f7508b..26a0963 100644
--- a/include/media/stagefright/MediaCodec.h
+++ b/include/media/stagefright/MediaCodec.h
@@ -30,6 +30,7 @@
 struct AString;
 struct CodecBase;
 struct ICrypto;
+struct IBatteryStats;
 struct SoftwareRenderer;
 struct Surface;
 
@@ -51,6 +52,8 @@
         CB_OUTPUT_FORMAT_CHANGED = 4,
     };
 
+    struct BatteryNotifier;
+
     static sp<MediaCodec> CreateByType(
             const sp<ALooper> &looper, const char *mime, bool encoder);
 
@@ -225,6 +228,9 @@
     sp<AMessage> mInputFormat;
     sp<AMessage> mCallback;
 
+    bool mBatteryStatNotified;
+    bool mIsVideo;
+
     // initial create parameters
     AString mInitName;
     bool mInitNameIsType;
@@ -294,6 +300,7 @@
     status_t onSetParameters(const sp<AMessage> &params);
 
     status_t amendOutputFormatWithCodecSpecificData(const sp<ABuffer> &buffer);
+    void updateBatteryStat();
 
     DISALLOW_EVIL_CONSTRUCTORS(MediaCodec);
 };
diff --git a/include/media/stagefright/MediaWriter.h b/include/media/stagefright/MediaWriter.h
index 5cc8dcf..e27ea1d 100644
--- a/include/media/stagefright/MediaWriter.h
+++ b/include/media/stagefright/MediaWriter.h
@@ -48,6 +48,9 @@
         return OK;
     }
 
+    virtual void setStartTimeOffsetMs(int ms) {}
+    virtual int32_t getStartTimeOffsetMs() const { return 0; }
+
 protected:
     virtual ~MediaWriter() {}
     int64_t mMaxFileSizeLimitBytes;
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index 5116d1e..fa1b20a 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -175,12 +175,11 @@
 
 // Proxy seen by AudioTrack client and AudioRecord client
 class ClientProxy : public Proxy {
-protected:
+public:
     ClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize,
             bool isOut, bool clientInServer);
     virtual ~ClientProxy() { }
 
-public:
     static const struct timespec kForever;
     static const struct timespec kNonBlocking;
 
@@ -394,8 +393,10 @@
 class AudioTrackServerProxy : public ServerProxy {
 public:
     AudioTrackServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
-            size_t frameSize, bool clientInServer = false)
-        : ServerProxy(cblk, buffers, frameCount, frameSize, true /*isOut*/, clientInServer) { }
+            size_t frameSize, bool clientInServer = false, uint32_t sampleRate = 0)
+        : ServerProxy(cblk, buffers, frameCount, frameSize, true /*isOut*/, clientInServer) {
+        mCblk->mSampleRate = sampleRate;
+    }
 protected:
     virtual ~AudioTrackServerProxy() { }
 
@@ -458,9 +459,8 @@
 class AudioRecordServerProxy : public ServerProxy {
 public:
     AudioRecordServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
-            size_t frameSize)
-        : ServerProxy(cblk, buffers, frameCount, frameSize, false /*isOut*/,
-            false /*clientInServer*/) { }
+            size_t frameSize, bool clientInServer)
+        : ServerProxy(cblk, buffers, frameCount, frameSize, false /*isOut*/, clientInServer) { }
 protected:
     virtual ~AudioRecordServerProxy() { }
 };
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index e9e453b..d2e181b 100644
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -81,6 +81,12 @@
     {"timelapse1080p", CAMCORDER_QUALITY_TIME_LAPSE_1080P},
     {"timelapse2160p", CAMCORDER_QUALITY_TIME_LAPSE_2160P},
     {"timelapseqvga", CAMCORDER_QUALITY_TIME_LAPSE_QVGA},
+
+    {"highspeedlow",  CAMCORDER_QUALITY_HIGH_SPEED_LOW},
+    {"highspeedhigh", CAMCORDER_QUALITY_HIGH_SPEED_HIGH},
+    {"highspeed480p", CAMCORDER_QUALITY_HIGH_SPEED_480P},
+    {"highspeed720p", CAMCORDER_QUALITY_HIGH_SPEED_720P},
+    {"highspeed1080p", CAMCORDER_QUALITY_HIGH_SPEED_1080P},
 };
 
 #if LOG_NDEBUG
@@ -474,6 +480,11 @@
            quality <= CAMCORDER_QUALITY_TIME_LAPSE_LIST_END;
 }
 
+static bool isHighSpeedProfile(camcorder_quality quality) {
+    return quality >= CAMCORDER_QUALITY_HIGH_SPEED_LIST_START &&
+           quality <= CAMCORDER_QUALITY_HIGH_SPEED_LIST_END;
+}
+
 void MediaProfiles::initRequiredProfileRefs(const Vector<int>& cameraIds) {
     ALOGV("Number of camera ids: %zu", cameraIds.size());
     CHECK(cameraIds.size() > 0);
@@ -521,14 +532,17 @@
         camcorder_quality refQuality;
         VideoCodec *codec = NULL;
 
-        // Check high and low from either camcorder profile or timelapse profile
-        // but not both. Default, check camcorder profile
+        // Check high and low from either camcorder profile, timelapse profile
+        // or high speed profile, but not all of them. Default, check camcorder profile
         size_t j = 0;
         size_t o = 2;
         if (isTimelapseProfile(quality)) {
             // Check timelapse profile instead.
             j = 2;
             o = kNumRequiredProfiles;
+        } else if (isHighSpeedProfile(quality)) {
+            // Skip the check for high speed profile.
+            continue;
         } else {
             // Must be camcorder profile.
             CHECK(isCamcorderProfile(quality));
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 889bd7f..2b7ea97 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -283,16 +283,21 @@
 status_t MediaPlayer::start()
 {
     ALOGV("start");
+
+    status_t ret = NO_ERROR;
     Mutex::Autolock _l(mLock);
-    if (mCurrentState & MEDIA_PLAYER_STARTED)
-        return NO_ERROR;
-    if ( (mPlayer != 0) && ( mCurrentState & ( MEDIA_PLAYER_PREPARED |
+
+    mLockThreadId = getThreadId();
+
+    if (mCurrentState & MEDIA_PLAYER_STARTED) {
+        ret = NO_ERROR;
+    } else if ( (mPlayer != 0) && ( mCurrentState & ( MEDIA_PLAYER_PREPARED |
                     MEDIA_PLAYER_PLAYBACK_COMPLETE | MEDIA_PLAYER_PAUSED ) ) ) {
         mPlayer->setLooping(mLoop);
         mPlayer->setVolume(mLeftVolume, mRightVolume);
         mPlayer->setAuxEffectSendLevel(mSendLevel);
         mCurrentState = MEDIA_PLAYER_STARTED;
-        status_t ret = mPlayer->start();
+        ret = mPlayer->start();
         if (ret != NO_ERROR) {
             mCurrentState = MEDIA_PLAYER_STATE_ERROR;
         } else {
@@ -300,10 +305,14 @@
                 ALOGV("playback completed immediately following start()");
             }
         }
-        return ret;
+    } else {
+        ALOGE("start called in state %d", mCurrentState);
+        ret = INVALID_OPERATION;
     }
-    ALOGE("start called in state %d", mCurrentState);
-    return INVALID_OPERATION;
+
+    mLockThreadId = 0;
+
+    return ret;
 }
 
 status_t MediaPlayer::stop()
@@ -706,8 +715,8 @@
     // running in the same process as the media server. In that case,
     // this will deadlock.
     //
-    // The threadId hack below works around this for the care of prepare
-    // and seekTo within the same process.
+    // The threadId hack below works around this for the care of prepare,
+    // seekTo and start within the same process.
     // FIXME: Remember, this is a hack, it's not even a hack that is applied
     // consistently for all use-cases, this needs to be revisited.
     if (mLockThreadId != getThreadId()) {
diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp
index c8192e9..1952b86 100644
--- a/media/libmedia/mediarecorder.cpp
+++ b/media/libmedia/mediarecorder.cpp
@@ -186,8 +186,11 @@
         ALOGE("setOutputFormat called in an invalid state: %d", mCurrentState);
         return INVALID_OPERATION;
     }
-    if (mIsVideoSourceSet && of >= OUTPUT_FORMAT_AUDIO_ONLY_START && of != OUTPUT_FORMAT_RTP_AVP && of != OUTPUT_FORMAT_MPEG2TS) { //first non-video output format
-        ALOGE("output format (%d) is meant for audio recording only and incompatible with video recording", of);
+    if (mIsVideoSourceSet
+            && of >= OUTPUT_FORMAT_AUDIO_ONLY_START //first non-video output format
+            && of < OUTPUT_FORMAT_AUDIO_ONLY_END) {
+        ALOGE("output format (%d) is meant for audio recording only"
+              " and incompatible with video recording", of);
         return INVALID_OPERATION;
     }
 
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index 48d44c1..0c7e590c 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -49,6 +49,7 @@
     $(TOP)/frameworks/av/media/libstagefright/include               \
     $(TOP)/frameworks/av/media/libstagefright/rtsp                  \
     $(TOP)/frameworks/av/media/libstagefright/wifi-display          \
+    $(TOP)/frameworks/av/media/libstagefright/webm                  \
     $(TOP)/frameworks/native/include/media/openmax                  \
     $(TOP)/external/tremolo/Tremolo                                 \
 
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 7218467..735344c 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -34,6 +34,7 @@
 
 #include <utils/misc.h>
 
+#include <binder/IBatteryStats.h>
 #include <binder/IPCThreadState.h>
 #include <binder/IServiceManager.h>
 #include <binder/MemoryHeapBase.h>
@@ -275,6 +276,20 @@
     // speaker is on by default
     mBatteryAudio.deviceOn[SPEAKER] = 1;
 
+    // reset battery stats
+    // if the mediaserver has crashed, battery stats could be left
+    // in bad state, reset the state upon service start.
+    const sp<IServiceManager> sm(defaultServiceManager());
+    if (sm != NULL) {
+        const String16 name("batterystats");
+        sp<IBatteryStats> batteryStats =
+                interface_cast<IBatteryStats>(sm->getService(name));
+        if (batteryStats != NULL) {
+            batteryStats->noteResetVideo();
+            batteryStats->noteResetAudio();
+        }
+    }
+
     MediaPlayerFactory::registerBuiltinFactories();
 }
 
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index bfc075c..8774117 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -19,6 +19,7 @@
 #include <inttypes.h>
 #include <utils/Log.h>
 
+#include "WebmWriter.h"
 #include "StagefrightRecorder.h"
 
 #include <binder/IPCThreadState.h>
@@ -764,7 +765,8 @@
         case OUTPUT_FORMAT_DEFAULT:
         case OUTPUT_FORMAT_THREE_GPP:
         case OUTPUT_FORMAT_MPEG_4:
-            status = setupMPEG4Recording();
+        case OUTPUT_FORMAT_WEBM:
+            status = setupMPEG4orWEBMRecording();
             break;
 
         case OUTPUT_FORMAT_AMR_NB:
@@ -826,9 +828,14 @@
         case OUTPUT_FORMAT_DEFAULT:
         case OUTPUT_FORMAT_THREE_GPP:
         case OUTPUT_FORMAT_MPEG_4:
+        case OUTPUT_FORMAT_WEBM:
         {
+            bool isMPEG4 = true;
+            if (mOutputFormat == OUTPUT_FORMAT_WEBM) {
+                isMPEG4 = false;
+            }
             sp<MetaData> meta = new MetaData;
-            setupMPEG4MetaData(&meta);
+            setupMPEG4orWEBMMetaData(&meta);
             status = mWriter->start(meta.get());
             break;
         }
@@ -1538,12 +1545,17 @@
     return OK;
 }
 
-status_t StagefrightRecorder::setupMPEG4Recording() {
+status_t StagefrightRecorder::setupMPEG4orWEBMRecording() {
     mWriter.clear();
     mTotalBitRate = 0;
 
     status_t err = OK;
-    sp<MediaWriter> writer = new MPEG4Writer(mOutputFd);
+    sp<MediaWriter> writer;
+    if (mOutputFormat == OUTPUT_FORMAT_WEBM) {
+        writer = new WebmWriter(mOutputFd);
+    } else {
+        writer = new MPEG4Writer(mOutputFd);
+    }
 
     if (mVideoSource < VIDEO_SOURCE_LIST_END) {
 
@@ -1563,22 +1575,25 @@
         mTotalBitRate += mVideoBitRate;
     }
 
-    // Audio source is added at the end if it exists.
-    // This help make sure that the "recoding" sound is suppressed for
-    // camcorder applications in the recorded files.
-    if (!mCaptureTimeLapse && (mAudioSource != AUDIO_SOURCE_CNT)) {
-        err = setupAudioEncoder(writer);
-        if (err != OK) return err;
-        mTotalBitRate += mAudioBitRate;
-    }
+    if (mOutputFormat != OUTPUT_FORMAT_WEBM) {
+        // Audio source is added at the end if it exists.
+        // This help make sure that the "recoding" sound is suppressed for
+        // camcorder applications in the recorded files.
+        // TODO Audio source is currently unsupported for webm output; vorbis encoder needed.
+        if (!mCaptureTimeLapse && (mAudioSource != AUDIO_SOURCE_CNT)) {
+            err = setupAudioEncoder(writer);
+            if (err != OK) return err;
+            mTotalBitRate += mAudioBitRate;
+        }
 
-    if (mInterleaveDurationUs > 0) {
-        reinterpret_cast<MPEG4Writer *>(writer.get())->
-            setInterleaveDuration(mInterleaveDurationUs);
-    }
-    if (mLongitudex10000 > -3600000 && mLatitudex10000 > -3600000) {
-        reinterpret_cast<MPEG4Writer *>(writer.get())->
-            setGeoData(mLatitudex10000, mLongitudex10000);
+        if (mInterleaveDurationUs > 0) {
+            reinterpret_cast<MPEG4Writer *>(writer.get())->
+                setInterleaveDuration(mInterleaveDurationUs);
+        }
+        if (mLongitudex10000 > -3600000 && mLatitudex10000 > -3600000) {
+            reinterpret_cast<MPEG4Writer *>(writer.get())->
+                setGeoData(mLatitudex10000, mLongitudex10000);
+        }
     }
     if (mMaxFileDurationUs != 0) {
         writer->setMaxFileDuration(mMaxFileDurationUs);
@@ -1586,7 +1601,6 @@
     if (mMaxFileSizeBytes != 0) {
         writer->setMaxFileSize(mMaxFileSizeBytes);
     }
-
     if (mVideoSource == VIDEO_SOURCE_DEFAULT
             || mVideoSource == VIDEO_SOURCE_CAMERA) {
         mStartTimeOffsetMs = mEncoderProfiles->getStartTimeOffsetMs(mCameraId);
@@ -1595,8 +1609,7 @@
         mStartTimeOffsetMs = 200;
     }
     if (mStartTimeOffsetMs > 0) {
-        reinterpret_cast<MPEG4Writer *>(writer.get())->
-            setStartTimeOffsetMs(mStartTimeOffsetMs);
+        writer->setStartTimeOffsetMs(mStartTimeOffsetMs);
     }
 
     writer->setListener(mListener);
@@ -1604,20 +1617,22 @@
     return OK;
 }
 
-void StagefrightRecorder::setupMPEG4MetaData(sp<MetaData> *meta) {
+void StagefrightRecorder::setupMPEG4orWEBMMetaData(sp<MetaData> *meta) {
     int64_t startTimeUs = systemTime() / 1000;
     (*meta)->setInt64(kKeyTime, startTimeUs);
     (*meta)->setInt32(kKeyFileType, mOutputFormat);
     (*meta)->setInt32(kKeyBitRate, mTotalBitRate);
-    (*meta)->setInt32(kKey64BitFileOffset, mUse64BitFileOffset);
     if (mMovieTimeScale > 0) {
         (*meta)->setInt32(kKeyTimeScale, mMovieTimeScale);
     }
-    if (mTrackEveryTimeDurationUs > 0) {
-        (*meta)->setInt64(kKeyTrackTimeStatus, mTrackEveryTimeDurationUs);
-    }
-    if (mRotationDegrees != 0) {
-        (*meta)->setInt32(kKeyRotation, mRotationDegrees);
+    if (mOutputFormat != OUTPUT_FORMAT_WEBM) {
+        (*meta)->setInt32(kKey64BitFileOffset, mUse64BitFileOffset);
+        if (mTrackEveryTimeDurationUs > 0) {
+            (*meta)->setInt64(kKeyTrackTimeStatus, mTrackEveryTimeDurationUs);
+        }
+        if (mRotationDegrees != 0) {
+            (*meta)->setInt32(kKeyRotation, mRotationDegrees);
+        }
     }
 }
 
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index 377d168..9062f30 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -128,8 +128,8 @@
     sp<ALooper> mLooper;
 
     status_t prepareInternal();
-    status_t setupMPEG4Recording();
-    void setupMPEG4MetaData(sp<MetaData> *meta);
+    status_t setupMPEG4orWEBMRecording();
+    void setupMPEG4orWEBMMetaData(sp<MetaData> *meta);
     status_t setupAMRRecording();
     status_t setupAACRecording();
     status_t setupRawAudioRecording();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index f876cce..d144af1 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -766,6 +766,7 @@
                             offloadInfo.has_video = (mVideoDecoder != NULL);
                             offloadInfo.is_streaming = true;
 
+                            ALOGV("try to open AudioSink in offload mode");
                             err = mAudioSink->open(
                                     sampleRate,
                                     numChannels,
@@ -805,6 +806,7 @@
 
                     if (!mOffloadAudio) {
                         flags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
+                        ALOGV("open AudioSink in NON-offload mode");
                         CHECK_EQ(mAudioSink->open(
                                     sampleRate,
                                     numChannels,
@@ -952,6 +954,21 @@
             } else if (what == Renderer::kWhatMediaRenderingStart) {
                 ALOGV("media rendering started");
                 notifyListener(MEDIA_STARTED, 0, 0);
+            } else if (what == Renderer::kWhatAudioOffloadTearDown) {
+                ALOGV("Tear down audio offload, fall back to s/w path");
+                int64_t positionUs;
+                CHECK(msg->findInt64("positionUs", &positionUs));
+                mAudioSink->close();
+                mAudioDecoder.clear();
+                mRenderer->flush(true /* audio */);
+                if (mVideoDecoder != NULL) {
+                    mRenderer->flush(false /* audio */);
+                }
+                mRenderer->signalDisableOffloadAudio();
+                mOffloadAudio = false;
+
+                performSeek(positionUs);
+                instantiateDecoder(true /* audio */, &mAudioDecoder);
             }
             break;
         }
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 1b9bafb..8fce2f4 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -155,8 +155,14 @@
         }
     }
     mMediaBuffers.resize(mInputBuffers.size());
+    for (size_t i = 0; i < mMediaBuffers.size(); i++) {
+        mMediaBuffers.editItemAt(i) = NULL;
+    }
     mInputBufferIsDequeued.clear();
     mInputBufferIsDequeued.resize(mInputBuffers.size());
+    for (size_t i = 0; i < mInputBufferIsDequeued.size(); i++) {
+        mInputBufferIsDequeued.editItemAt(i) = false;
+    }
 }
 
 void NuPlayer::Decoder::requestCodecNotification() {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 8592ec2..3640038 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -223,6 +223,12 @@
             break;
         }
 
+        case kWhatAudioOffloadTearDown:
+        {
+            onAudioOffloadTearDown();
+            break;
+        }
+
         default:
             TRESPASS();
             break;
@@ -294,7 +300,7 @@
 
         case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN:
         {
-            // TODO: send this to player.
+            me->notifyAudioOffloadTearDown();
             break;
         }
     }
@@ -582,6 +588,10 @@
     notify->post();
 }
 
+void NuPlayer::Renderer::notifyAudioOffloadTearDown() {
+    (new AMessage(kWhatAudioOffloadTearDown, id()))->post();
+}
+
 void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
     int32_t audio;
     CHECK(msg->findInt32("audio", &audio));
@@ -814,6 +824,7 @@
 void NuPlayer::Renderer::onDisableOffloadAudio() {
     Mutex::Autolock autoLock(mLock);
     mFlags &= ~FLAG_OFFLOAD_AUDIO;
+    ++mAudioQueueGeneration;
 }
 
 void NuPlayer::Renderer::notifyPosition() {
@@ -880,5 +891,21 @@
     }
 }
 
+void NuPlayer::Renderer::onAudioOffloadTearDown() {
+    uint32_t numFramesPlayed;
+    CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed), (status_t)OK);
+
+    int64_t currentPositionUs = mFirstAudioTimeUs
+            + (numFramesPlayed * mAudioSink->msecsPerFrame()) * 1000ll;
+
+    mAudioSink->stop();
+    mAudioSink->flush();
+
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatAudioOffloadTearDown);
+    notify->setInt64("positionUs", currentPositionUs);
+    notify->post();
+}
+
 }  // namespace android
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index 6e86a8f..1cba1a0 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -62,6 +62,7 @@
         kWhatPosition            = 'posi',
         kWhatVideoRenderingStart = 'vdrd',
         kWhatMediaRenderingStart = 'mdrd',
+        kWhatAudioOffloadTearDown = 'aOTD',
     };
 
 protected:
@@ -143,12 +144,14 @@
     void onDisableOffloadAudio();
     void onPause();
     void onResume();
+    void onAudioOffloadTearDown();
 
     void notifyEOS(bool audio, status_t finalResult);
     void notifyFlushComplete(bool audio);
     void notifyPosition();
     void notifyVideoLateBy(int64_t lateByUs);
     void notifyVideoRenderingStart();
+    void notifyAudioOffloadTearDown();
 
     void flushQueue(List<QueueEntry> *queue);
     bool dropBufferWhileFlushing(bool audio, const sp<AMessage> &msg);
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 6cb1c64..b6cc742 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -2765,6 +2765,50 @@
 
                     break;
                 }
+
+                case OMX_VIDEO_CodingVP8:
+                case OMX_VIDEO_CodingVP9:
+                {
+                    OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE vp8type;
+                    InitOMXParams(&vp8type);
+                    vp8type.nPortIndex = kPortIndexOutput;
+                    status_t err = mOMX->getParameter(
+                            mNode,
+                            (OMX_INDEXTYPE)OMX_IndexParamVideoAndroidVp8Encoder,
+                            &vp8type,
+                            sizeof(vp8type));
+
+                    if (err == OK) {
+                        AString tsSchema = "none";
+                        if (vp8type.eTemporalPattern
+                                == OMX_VIDEO_VPXTemporalLayerPatternWebRTC) {
+                            switch (vp8type.nTemporalLayerCount) {
+                                case 1:
+                                {
+                                    tsSchema = "webrtc.vp8.1-layer";
+                                    break;
+                                }
+                                case 2:
+                                {
+                                    tsSchema = "webrtc.vp8.2-layer";
+                                    break;
+                                }
+                                case 3:
+                                {
+                                    tsSchema = "webrtc.vp8.3-layer";
+                                    break;
+                                }
+                                default:
+                                {
+                                    break;
+                                }
+                            }
+                        }
+                        notify->setString("ts-schema", tsSchema);
+                    }
+                    // Fall through to set up mime.
+                }
+
                 default:
                 {
                     CHECK(mIsEncoder ^ (portIndex == kPortIndexInput));
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index d9aed01..a67fabe 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -159,6 +159,8 @@
     waitOutstandingEncodingFrames_l();
     releaseQueuedFrames_l();
 
+    mFrameAvailableCondition.signal();
+
     return OK;
 }
 
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 7a9cb0b..15e062e 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -16,13 +16,13 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "MediaCodec"
-#include <utils/Log.h>
 #include <inttypes.h>
 
-#include <media/stagefright/MediaCodec.h>
-
+#include "include/avc_utils.h"
 #include "include/SoftwareRenderer.h"
 
+#include <binder/IBatteryStats.h>
+#include <binder/IServiceManager.h>
 #include <gui/Surface.h>
 #include <media/ICrypto.h>
 #include <media/stagefright/foundation/ABuffer.h>
@@ -32,16 +32,85 @@
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/ACodec.h>
 #include <media/stagefright/BufferProducerWrapper.h>
+#include <media/stagefright/MediaCodec.h>
 #include <media/stagefright/MediaCodecList.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/NativeWindowWrapper.h>
-
-#include "include/avc_utils.h"
+#include <private/android_filesystem_config.h>
+#include <utils/Log.h>
+#include <utils/Singleton.h>
 
 namespace android {
 
+struct MediaCodec::BatteryNotifier : public Singleton<BatteryNotifier> {
+    BatteryNotifier();
+
+    void noteStartVideo();
+    void noteStopVideo();
+    void noteStartAudio();
+    void noteStopAudio();
+
+private:
+    int32_t mVideoRefCount;
+    int32_t mAudioRefCount;
+    sp<IBatteryStats> mBatteryStatService;
+};
+
+ANDROID_SINGLETON_STATIC_INSTANCE(MediaCodec::BatteryNotifier)
+
+MediaCodec::BatteryNotifier::BatteryNotifier() :
+    mVideoRefCount(0),
+    mAudioRefCount(0) {
+    // get battery service
+    const sp<IServiceManager> sm(defaultServiceManager());
+    if (sm != NULL) {
+        const String16 name("batterystats");
+        mBatteryStatService = interface_cast<IBatteryStats>(sm->getService(name));
+        if (mBatteryStatService == NULL) {
+            ALOGE("batterystats service unavailable!");
+        }
+    }
+}
+
+void MediaCodec::BatteryNotifier::noteStartVideo() {
+    if (mVideoRefCount == 0 && mBatteryStatService != NULL) {
+        mBatteryStatService->noteStartVideo(AID_MEDIA);
+    }
+    mVideoRefCount++;
+}
+
+void MediaCodec::BatteryNotifier::noteStopVideo() {
+    if (mVideoRefCount == 0) {
+        ALOGW("BatteryNotifier::noteStop(): video refcount is broken!");
+        return;
+    }
+
+    mVideoRefCount--;
+    if (mVideoRefCount == 0 && mBatteryStatService != NULL) {
+        mBatteryStatService->noteStopVideo(AID_MEDIA);
+    }
+}
+
+void MediaCodec::BatteryNotifier::noteStartAudio() {
+    if (mAudioRefCount == 0 && mBatteryStatService != NULL) {
+        mBatteryStatService->noteStartAudio(AID_MEDIA);
+    }
+    mAudioRefCount++;
+}
+
+void MediaCodec::BatteryNotifier::noteStopAudio() {
+    if (mAudioRefCount == 0) {
+        ALOGW("BatteryNotifier::noteStop(): audio refcount is broken!");
+        return;
+    }
+
+    mAudioRefCount--;
+    if (mAudioRefCount == 0 && mBatteryStatService != NULL) {
+        mBatteryStatService->noteStopAudio(AID_MEDIA);
+    }
+}
 // static
 sp<MediaCodec> MediaCodec::CreateByType(
         const sp<ALooper> &looper, const char *mime, bool encoder) {
@@ -71,6 +140,8 @@
       mReplyID(0),
       mFlags(0),
       mSoftRenderer(NULL),
+      mBatteryStatNotified(false),
+      mIsVideo(false),
       mDequeueInputTimeoutGeneration(0),
       mDequeueInputReplyID(0),
       mDequeueOutputTimeoutGeneration(0),
@@ -756,7 +827,6 @@
                 case CodecBase::kWhatComponentConfigured:
                 {
                     CHECK_EQ(mState, CONFIGURING);
-                    setState(CONFIGURED);
 
                     // reset input surface flag
                     mHaveInputSurface = false;
@@ -764,6 +834,7 @@
                     CHECK(msg->findMessage("input-format", &mInputFormat));
                     CHECK(msg->findMessage("output-format", &mOutputFormat));
 
+                    setState(CONFIGURED);
                     (new AMessage)->postReply(mReplyID);
                     break;
                 }
@@ -1620,6 +1691,8 @@
     mState = newState;
 
     cancelPendingDequeueOperations();
+
+    updateBatteryStat();
 }
 
 void MediaCodec::returnBuffersToCodec() {
@@ -2054,4 +2127,34 @@
     return OK;
 }
 
+void MediaCodec::updateBatteryStat() {
+    if (mState == CONFIGURED && !mBatteryStatNotified) {
+        AString mime;
+        CHECK(mOutputFormat != NULL &&
+                mOutputFormat->findString("mime", &mime));
+
+        mIsVideo = mime.startsWithIgnoreCase("video/");
+
+        BatteryNotifier& notifier(BatteryNotifier::getInstance());
+
+        if (mIsVideo) {
+            notifier.noteStartVideo();
+        } else {
+            notifier.noteStartAudio();
+        }
+
+        mBatteryStatNotified = true;
+    } else if (mState == UNINITIALIZED && mBatteryStatNotified) {
+        BatteryNotifier& notifier(BatteryNotifier::getInstance());
+
+        if (mIsVideo) {
+            notifier.noteStopVideo();
+        } else {
+            notifier.noteStopAudio();
+        }
+
+        mBatteryStatNotified = false;
+    }
+}
+
 }  // namespace android
diff --git a/media/libstagefright/webm/WebmWriter.h b/media/libstagefright/webm/WebmWriter.h
index 529dec8..36b6965 100644
--- a/media/libstagefright/webm/WebmWriter.h
+++ b/media/libstagefright/webm/WebmWriter.h
@@ -41,14 +41,14 @@
     ~WebmWriter() { reset(); }
 
 
-    status_t addSource(const sp<MediaSource> &source);
-    status_t start(MetaData *param = NULL);
-    status_t stop();
-    status_t pause();
-    bool reachedEOS();
+    virtual status_t addSource(const sp<MediaSource> &source);
+    virtual status_t start(MetaData *param = NULL);
+    virtual status_t stop();
+    virtual status_t pause();
+    virtual bool reachedEOS();
 
-    void setStartTimeOffsetMs(int ms) { mStartTimeOffsetMs = ms; }
-    int32_t getStartTimeOffsetMs() const { return mStartTimeOffsetMs; }
+    virtual void setStartTimeOffsetMs(int ms) { mStartTimeOffsetMs = ms; }
+    virtual int32_t getStartTimeOffsetMs() const { return mStartTimeOffsetMs; }
 
 private:
     int mFd;
diff --git a/media/mtp/Android.mk b/media/mtp/Android.mk
index ac608a1..3af0956 100644
--- a/media/mtp/Android.mk
+++ b/media/mtp/Android.mk
@@ -39,9 +39,6 @@
 
 LOCAL_CFLAGS := -DMTP_DEVICE -DMTP_HOST
 
-# Needed for <bionic_time.h>
-LOCAL_C_INCLUDES := bionic/libc/private
-
 LOCAL_SHARED_LIBRARIES := libutils libcutils liblog libusbhost libbinder
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/mtp/MtpUtils.cpp b/media/mtp/MtpUtils.cpp
index 6ec8876..0667bdd 100644
--- a/media/mtp/MtpUtils.cpp
+++ b/media/mtp/MtpUtils.cpp
@@ -19,7 +19,8 @@
 #include <stdio.h>
 #include <time.h>
 
-#include <cutils/tztime.h>
+#include <../private/bionic_time.h> /* TODO: switch this code to icu4c! */
+
 #include "MtpUtils.h"
 
 namespace android {
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 1ad6285..f10a561 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -1531,7 +1531,7 @@
     }
 
     audio_module_handle_t handle = nextUniqueId();
-    mAudioHwDevs.add(handle, new AudioHwDevice(name, dev, flags));
+    mAudioHwDevs.add(handle, new AudioHwDevice(handle, name, dev, flags));
 
     ALOGI("loadHwModule() Loaded %s audio interface from %s (%s) handle %d",
           name, dev->common.module->name, dev->common.module->id, handle);
@@ -1575,6 +1575,84 @@
 
 // ----------------------------------------------------------------------------
 
+
+sp<AudioFlinger::PlaybackThread> AudioFlinger::openOutput_l(audio_module_handle_t module,
+                                                            audio_devices_t device,
+                                                            struct audio_config *config,
+                                                            audio_output_flags_t flags)
+{
+    AudioHwDevice *outHwDev = findSuitableHwDev_l(module, device);
+    if (outHwDev == NULL) {
+        return AUDIO_IO_HANDLE_NONE;
+    }
+
+    audio_hw_device_t *hwDevHal = outHwDev->hwDevice();
+    audio_io_handle_t id = nextUniqueId();
+
+    mHardwareStatus = AUDIO_HW_OUTPUT_OPEN;
+
+    audio_stream_out_t *outStream = NULL;
+
+    // FOR TESTING ONLY:
+    // This if statement allows overriding the audio policy settings
+    // and forcing a specific format or channel mask to the HAL/Sink device for testing.
+    if (!(flags & (AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD | AUDIO_OUTPUT_FLAG_DIRECT))) {
+        // Check only for Normal Mixing mode
+        if (kEnableExtendedPrecision) {
+            // Specify format (uncomment one below to choose)
+            //config->format = AUDIO_FORMAT_PCM_FLOAT;
+            //config->format = AUDIO_FORMAT_PCM_24_BIT_PACKED;
+            //config->format = AUDIO_FORMAT_PCM_32_BIT;
+            //config->format = AUDIO_FORMAT_PCM_8_24_BIT;
+            // ALOGV("openOutput() upgrading format to %#08x", config.format);
+        }
+        if (kEnableExtendedChannels) {
+            // Specify channel mask (uncomment one below to choose)
+            //config->channel_mask = audio_channel_out_mask_from_count(4);  // for USB 4ch
+            //config->channel_mask = audio_channel_mask_from_representation_and_bits(
+            //        AUDIO_CHANNEL_REPRESENTATION_INDEX, (1 << 4) - 1);  // another 4ch example
+        }
+    }
+
+    status_t status = hwDevHal->open_output_stream(hwDevHal,
+                                          id,
+                                          device,
+                                          flags,
+                                          config,
+                                          &outStream);
+
+    mHardwareStatus = AUDIO_HW_IDLE;
+    ALOGV("openOutput() openOutputStream returned output %p, sampleRate %d, Format %#x, "
+            "channelMask %#x, status %d",
+            outStream,
+            config->sample_rate,
+            config->format,
+            config->channel_mask,
+            status);
+
+    if (status == NO_ERROR && outStream != NULL) {
+        AudioStreamOut *output = new AudioStreamOut(outHwDev, outStream, flags);
+
+        PlaybackThread *thread;
+        if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
+            thread = new OffloadThread(this, output, id, device);
+            ALOGV("openOutput() created offload output: ID %d thread %p", id, thread);
+        } else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT)
+                || !isValidPcmSinkFormat(config->format)
+                || !isValidPcmSinkChannelMask(config->channel_mask)) {
+            thread = new DirectOutputThread(this, output, id, device);
+            ALOGV("openOutput() created direct output: ID %d thread %p", id, thread);
+        } else {
+            thread = new MixerThread(this, output, id, device);
+            ALOGV("openOutput() created mixer output: ID %d thread %p", id, thread);
+        }
+        mPlaybackThreads.add(id, thread);
+        return thread;
+    }
+
+    return 0;
+}
+
 audio_io_handle_t AudioFlinger::openOutput(audio_module_handle_t module,
                                            audio_devices_t *pDevices,
                                            uint32_t *pSamplingRate,
@@ -1609,64 +1687,8 @@
 
     Mutex::Autolock _l(mLock);
 
-    AudioHwDevice *outHwDev = findSuitableHwDev_l(module, *pDevices);
-    if (outHwDev == NULL) {
-        return AUDIO_IO_HANDLE_NONE;
-    }
-
-    audio_hw_device_t *hwDevHal = outHwDev->hwDevice();
-    audio_io_handle_t id = nextUniqueId();
-
-    mHardwareStatus = AUDIO_HW_OUTPUT_OPEN;
-
-    audio_stream_out_t *outStream = NULL;
-
-    // FOR TESTING ONLY:
-    // Enable increased sink precision for mixing mode if kEnableExtendedPrecision is true.
-    if (kEnableExtendedPrecision &&  // Check only for Normal Mixing mode
-            !(flags & (AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD | AUDIO_OUTPUT_FLAG_DIRECT))) {
-        // Update format
-        //config.format = AUDIO_FORMAT_PCM_FLOAT;
-        //config.format = AUDIO_FORMAT_PCM_24_BIT_PACKED;
-        //config.format = AUDIO_FORMAT_PCM_32_BIT;
-        //config.format = AUDIO_FORMAT_PCM_8_24_BIT;
-        // ALOGV("openOutput() upgrading format to %#08x", config.format);
-    }
-
-    status_t status = hwDevHal->open_output_stream(hwDevHal,
-                                          id,
-                                          *pDevices,
-                                          (audio_output_flags_t)flags,
-                                          &config,
-                                          &outStream);
-
-    mHardwareStatus = AUDIO_HW_IDLE;
-    ALOGV("openOutput() openOutputStream returned output %p, SamplingRate %d, Format %#08x, "
-            "Channels %x, status %d",
-            outStream,
-            config.sample_rate,
-            config.format,
-            config.channel_mask,
-            status);
-
-    if (status == NO_ERROR && outStream != NULL) {
-        AudioStreamOut *output = new AudioStreamOut(outHwDev, outStream, flags);
-
-        PlaybackThread *thread;
-        if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
-            thread = new OffloadThread(this, output, id, *pDevices);
-            ALOGV("openOutput() created offload output: ID %d thread %p", id, thread);
-        } else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT)
-                || !isValidPcmSinkFormat(config.format)
-                || (config.channel_mask != AUDIO_CHANNEL_OUT_STEREO)) {
-            thread = new DirectOutputThread(this, output, id, *pDevices);
-            ALOGV("openOutput() created direct output: ID %d thread %p", id, thread);
-        } else {
-            thread = new MixerThread(this, output, id, *pDevices);
-            ALOGV("openOutput() created mixer output: ID %d thread %p", id, thread);
-        }
-        mPlaybackThreads.add(id, thread);
-
+    sp<PlaybackThread> thread = openOutput_l(module, *pDevices, &config, flags);
+    if (thread != 0) {
         if (pSamplingRate != NULL) {
             *pSamplingRate = config.sample_rate;
         }
@@ -1686,16 +1708,16 @@
         // the first primary output opened designates the primary hw device
         if ((mPrimaryHardwareDev == NULL) && (flags & AUDIO_OUTPUT_FLAG_PRIMARY)) {
             ALOGI("Using module %d has the primary audio interface", module);
-            mPrimaryHardwareDev = outHwDev;
+            mPrimaryHardwareDev = thread->getOutput()->audioHwDev;
 
             AutoMutex lock(mHardwareLock);
             mHardwareStatus = AUDIO_HW_SET_MODE;
-            hwDevHal->set_mode(hwDevHal, mMode);
+            mPrimaryHardwareDev->hwDevice()->set_mode(mPrimaryHardwareDev->hwDevice(), mMode);
             mHardwareStatus = AUDIO_HW_IDLE;
 
             mPrimaryOutputSampleRate = config.sample_rate;
         }
-        return id;
+        return thread->id();
     }
 
     return AUDIO_IO_HANDLE_NONE;
@@ -1776,15 +1798,28 @@
     // but the ThreadBase container still exists.
 
     if (thread->type() != ThreadBase::DUPLICATING) {
-        AudioStreamOut *out = thread->clearOutput();
-        ALOG_ASSERT(out != NULL, "out shouldn't be NULL");
-        // from now on thread->mOutput is NULL
-        out->hwDev()->close_output_stream(out->hwDev(), out->stream);
-        delete out;
+        closeOutputFinish(thread);
     }
+
     return NO_ERROR;
 }
 
+void AudioFlinger::closeOutputFinish(sp<PlaybackThread> thread)
+{
+    AudioStreamOut *out = thread->clearOutput();
+    ALOG_ASSERT(out != NULL, "out shouldn't be NULL");
+    // from now on thread->mOutput is NULL
+    out->hwDev()->close_output_stream(out->hwDev(), out->stream);
+    delete out;
+}
+
+void AudioFlinger::closeOutputInternal_l(sp<PlaybackThread> thread)
+{
+    mPlaybackThreads.removeItem(thread->mId);
+    thread->exit();
+    closeOutputFinish(thread);
+}
+
 status_t AudioFlinger::suspendOutput(audio_io_handle_t output)
 {
     Mutex::Autolock _l(mLock);
@@ -1823,6 +1858,12 @@
                                           audio_channel_mask_t *pChannelMask,
                                           audio_input_flags_t flags)
 {
+    Mutex::Autolock _l(mLock);
+
+    if (pDevices == NULL || *pDevices == AUDIO_DEVICE_NONE) {
+        return AUDIO_IO_HANDLE_NONE;
+    }
+
     struct audio_config config;
     memset(&config, 0, sizeof(config));
     config.sample_rate = (pSamplingRate != NULL) ? *pSamplingRate : 0;
@@ -1833,13 +1874,36 @@
     audio_format_t reqFormat = config.format;
     audio_channel_mask_t reqChannelMask = config.channel_mask;
 
-    if (pDevices == NULL || *pDevices == AUDIO_DEVICE_NONE) {
-        return 0;
+    sp<RecordThread> thread = openInput_l(module, *pDevices, &config, flags);
+
+    if (thread != 0) {
+        if (pSamplingRate != NULL) {
+            *pSamplingRate = reqSamplingRate;
+        }
+        if (pFormat != NULL) {
+            *pFormat = config.format;
+        }
+        if (pChannelMask != NULL) {
+            *pChannelMask = reqChannelMask;
+        }
+
+        // notify client processes of the new input creation
+        thread->audioConfigChanged(AudioSystem::INPUT_OPENED);
+        return thread->id();
     }
+    return AUDIO_IO_HANDLE_NONE;
+}
 
-    Mutex::Autolock _l(mLock);
+sp<AudioFlinger::RecordThread> AudioFlinger::openInput_l(audio_module_handle_t module,
+                                                         audio_devices_t device,
+                                                         struct audio_config *config,
+                                                         audio_input_flags_t flags)
+{
+    uint32_t reqSamplingRate = config->sample_rate;
+    audio_format_t reqFormat = config->format;
+    audio_channel_mask_t reqChannelMask = config->channel_mask;
 
-    AudioHwDevice *inHwDev = findSuitableHwDev_l(module, *pDevices);
+    AudioHwDevice *inHwDev = findSuitableHwDev_l(module, device);
     if (inHwDev == NULL) {
         return 0;
     }
@@ -1848,14 +1912,14 @@
     audio_io_handle_t id = nextUniqueId();
 
     audio_stream_in_t *inStream = NULL;
-    status_t status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config,
+    status_t status = inHwHal->open_input_stream(inHwHal, id, device, config,
                                         &inStream, flags);
     ALOGV("openInput() openInputStream returned input %p, SamplingRate %d, Format %#x, Channels %x, "
             "flags %#x, status %d",
             inStream,
-            config.sample_rate,
-            config.format,
-            config.channel_mask,
+            config->sample_rate,
+            config->format,
+            config->channel_mask,
             flags,
             status);
 
@@ -1863,14 +1927,14 @@
     // conversion internally, try to open again with the proposed parameters. The AudioFlinger can
     // resample the input and do mono to stereo or stereo to mono conversions on 16 bit PCM inputs.
     if (status == BAD_VALUE &&
-        reqFormat == config.format && config.format == AUDIO_FORMAT_PCM_16_BIT &&
-        (config.sample_rate <= 2 * reqSamplingRate) &&
-        (audio_channel_count_from_in_mask(config.channel_mask) <= FCC_2) &&
+        reqFormat == config->format && config->format == AUDIO_FORMAT_PCM_16_BIT &&
+        (config->sample_rate <= 2 * reqSamplingRate) &&
+        (audio_channel_count_from_in_mask(config->channel_mask) <= FCC_2) &&
         (audio_channel_count_from_in_mask(reqChannelMask) <= FCC_2)) {
         // FIXME describe the change proposed by HAL (save old values so we can log them here)
         ALOGV("openInput() reopening with proposed sampling rate and channel mask");
         inStream = NULL;
-        status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config, &inStream, flags);
+        status = inHwHal->open_input_stream(inHwHal, id, device, config, &inStream, flags);
         // FIXME log this new status; HAL should not propose any further changes
     }
 
@@ -1931,30 +1995,18 @@
         // Start record thread
         // RecordThread requires both input and output device indication to forward to audio
         // pre processing modules
-        RecordThread *thread = new RecordThread(this,
+        sp<RecordThread> thread = new RecordThread(this,
                                   input,
                                   id,
                                   primaryOutputDevice_l(),
-                                  *pDevices
+                                  device
 #ifdef TEE_SINK
                                   , teeSink
 #endif
                                   );
         mRecordThreads.add(id, thread);
-        ALOGV("openInput() created record thread: ID %d thread %p", id, thread);
-        if (pSamplingRate != NULL) {
-            *pSamplingRate = reqSamplingRate;
-        }
-        if (pFormat != NULL) {
-            *pFormat = config.format;
-        }
-        if (pChannelMask != NULL) {
-            *pChannelMask = reqChannelMask;
-        }
-
-        // notify client processes of the new input creation
-        thread->audioConfigChanged(AudioSystem::INPUT_OPENED);
-        return id;
+        ALOGV("openInput() created record thread: ID %d thread %p", id, thread.get());
+        return thread;
     }
 
     return 0;
@@ -1981,17 +2033,26 @@
         audioConfigChanged(AudioSystem::INPUT_CLOSED, input, NULL);
         mRecordThreads.removeItem(input);
     }
-    thread->exit();
-    // The thread entity (active unit of execution) is no longer running here,
-    // but the ThreadBase container still exists.
+    // FIXME: calling thread->exit() without mLock held should not be needed anymore now that
+    // we have a different lock for notification client
+    closeInputFinish(thread);
+    return NO_ERROR;
+}
 
+void AudioFlinger::closeInputFinish(sp<RecordThread> thread)
+{
+    thread->exit();
     AudioStreamIn *in = thread->clearInput();
     ALOG_ASSERT(in != NULL, "in shouldn't be NULL");
     // from now on thread->mInput is NULL
     in->hwDev()->close_input_stream(in->hwDev(), in->stream);
     delete in;
+}
 
-    return NO_ERROR;
+void AudioFlinger::closeInputInternal_l(sp<RecordThread> thread)
+{
+    mRecordThreads.removeItem(thread->mId);
+    closeInputFinish(thread);
 }
 
 status_t AudioFlinger::invalidateStream(audio_stream_type_t stream)
@@ -2462,6 +2523,16 @@
         return INVALID_OPERATION;
     }
 
+    // Check whether the destination thread has a channel count of FCC_2, which is
+    // currently required for (most) effects. Prevent moving the effect chain here rather
+    // than disabling the addEffect_l() call in dstThread below.
+    if (dstThread->mChannelCount != FCC_2) {
+        ALOGW("moveEffectChain_l() effect chain failed because"
+                " destination thread %p channel count(%u) != %u",
+                dstThread, dstThread->mChannelCount, FCC_2);
+        return INVALID_OPERATION;
+    }
+
     // remove chain first. This is useful only if reconfiguring effect chain on same output thread,
     // so that a new chain is created with correct parameters when first effect is added. This is
     // otherwise unnecessary as removeEffect_l() will remove the chain when last effect is
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index bae18fd..ab4c567 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -55,6 +55,7 @@
 #include "FastMixer.h"
 #include <media/nbaio/NBAIO.h>
 #include "AudioWatchdog.h"
+#include "AudioMixer.h"
 
 #include <powermanager/IPowerManager.h>
 
@@ -327,6 +328,30 @@
                                                 audio_devices_t devices);
     void                    purgeStaleEffects_l();
 
+    // Set kEnableExtendedChannels to true to enable greater than stereo output
+    // for the MixerThread and device sink.  Number of channels allowed is
+    // FCC_2 <= channels <= AudioMixer::MAX_NUM_CHANNELS.
+    static const bool kEnableExtendedChannels = false;
+
+    // Returns true if channel mask is permitted for the PCM sink in the MixerThread
+    static inline bool isValidPcmSinkChannelMask(audio_channel_mask_t channelMask) {
+        switch (audio_channel_mask_get_representation(channelMask)) {
+        case AUDIO_CHANNEL_REPRESENTATION_POSITION: {
+            uint32_t channelCount = FCC_2; // stereo is default
+            if (kEnableExtendedChannels) {
+                channelCount = audio_channel_count_from_out_mask(channelMask);
+                if (channelCount > AudioMixer::MAX_NUM_CHANNELS) {
+                    return false;
+                }
+            }
+            // check that channelMask is the "canonical" one we expect for the channelCount.
+            return channelMask == audio_channel_out_mask_from_count(channelCount);
+            }
+        default:
+            return false;
+        }
+    }
+
     // Set kEnableExtendedPrecision to true to use extended precision in MixerThread
     static const bool kEnableExtendedPrecision = true;
 
@@ -489,6 +514,18 @@
               PlaybackThread *checkPlaybackThread_l(audio_io_handle_t output) const;
               MixerThread *checkMixerThread_l(audio_io_handle_t output) const;
               RecordThread *checkRecordThread_l(audio_io_handle_t input) const;
+              sp<RecordThread> openInput_l(audio_module_handle_t module,
+                                           audio_devices_t device,
+                                           struct audio_config *config,
+                                           audio_input_flags_t flags);
+              sp<PlaybackThread> openOutput_l(audio_module_handle_t module,
+                                              audio_devices_t device,
+                                              struct audio_config *config,
+                                              audio_output_flags_t flags);
+
+              void closeOutputFinish(sp<PlaybackThread> thread);
+              void closeInputFinish(sp<RecordThread> thread);
+
               // no range check, AudioFlinger::mLock held
               bool streamMute_l(audio_stream_type_t stream) const
                                 { return mStreamTypes[stream].mute; }
@@ -530,10 +567,11 @@
             AHWD_CAN_SET_MASTER_MUTE    = 0x2,
         };
 
-        AudioHwDevice(const char *moduleName,
+        AudioHwDevice(audio_module_handle_t handle,
+                      const char *moduleName,
                       audio_hw_device_t *hwDevice,
                       Flags flags)
-            : mModuleName(strdup(moduleName))
+            : mHandle(handle), mModuleName(strdup(moduleName))
             , mHwDevice(hwDevice)
             , mFlags(flags) { }
         /*virtual*/ ~AudioHwDevice() { free((void *)mModuleName); }
@@ -546,11 +584,13 @@
             return (0 != (mFlags & AHWD_CAN_SET_MASTER_MUTE));
         }
 
+        audio_module_handle_t handle() const { return mHandle; }
         const char *moduleName() const { return mModuleName; }
         audio_hw_device_t *hwDevice() const { return mHwDevice; }
         uint32_t version() const { return mHwDevice->common.version; }
 
     private:
+        const audio_module_handle_t mHandle;
         const char * const mModuleName;
         audio_hw_device_t * const mHwDevice;
         const Flags mFlags;
@@ -669,7 +709,9 @@
 
     // for use from destructor
     status_t    closeOutput_nonvirtual(audio_io_handle_t output);
+    void        closeOutputInternal_l(sp<PlaybackThread> thread);
     status_t    closeInput_nonvirtual(audio_io_handle_t input);
+    void        closeInputInternal_l(sp<RecordThread> thread);
 
 #ifdef TEE_SINK
     // all record threads serially share a common tee sink, which is re-created on format change
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index 529f2af..6edca1b 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -40,16 +40,19 @@
 #include <common_time/cc_helper.h>
 
 #include <media/EffectsFactoryApi.h>
+#include <audio_effects/effect_downmix.h>
 
 #include "AudioMixerOps.h"
 #include "AudioMixer.h"
 
-// Use the FCC_2 macro for code assuming Fixed Channel Count of 2 and
-// whose stereo assumption may need to be revisited later.
+// The FCC_2 macro refers to the Fixed Channel Count of 2 for the legacy integer mixer.
 #ifndef FCC_2
 #define FCC_2 2
 #endif
 
+// Look for MONO_HACK for any Mono hack involving legacy mono channel to
+// stereo channel conversion.
+
 /* VERY_VERY_VERBOSE_LOGGING will show exactly which process hook and track hook is
  * being used. This is a considerable amount of log spam, so don't enable unless you
  * are verifying the hook based code.
@@ -99,7 +102,7 @@
     ALOGV("CopyBufferProvider(%p)(%zu, %zu, %zu)", this,
             inputFrameSize, outputFrameSize, bufferFrameCount);
     LOG_ALWAYS_FATAL_IF(inputFrameSize < outputFrameSize && bufferFrameCount == 0,
-            "Requires local buffer if inputFrameSize(%d) < outputFrameSize(%d)",
+            "Requires local buffer if inputFrameSize(%zu) < outputFrameSize(%zu)",
             inputFrameSize, outputFrameSize);
     if (mLocalBufferFrameCount) {
         (void)posix_memalign(&mLocalBufferData, 32, mLocalBufferFrameCount * mOutputFrameSize);
@@ -335,7 +338,7 @@
         mInputChannels(audio_channel_count_from_out_mask(inputChannelMask)),
         mOutputChannels(audio_channel_count_from_out_mask(outputChannelMask))
 {
-    ALOGV("RemixBufferProvider(%p)(%#x, %#x, %#x) %d %d",
+    ALOGV("RemixBufferProvider(%p)(%#x, %#x, %#x) %zu %zu",
             this, format, inputChannelMask, outputChannelMask,
             mInputChannels, mOutputChannels);
     // TODO: consider channel representation in index array formulation
@@ -379,18 +382,12 @@
     :   mTrackNames(0), mConfiguredNames((maxNumTracks >= 32 ? 0 : 1 << maxNumTracks) - 1),
         mSampleRate(sampleRate)
 {
-    // AudioMixer is not yet capable of multi-channel beyond stereo
-    COMPILE_TIME_ASSERT_FUNCTION_SCOPE(2 == MAX_NUM_CHANNELS);
-
     ALOG_ASSERT(maxNumTracks <= MAX_NUM_TRACKS, "maxNumTracks %u > MAX_NUM_TRACKS %u",
             maxNumTracks, MAX_NUM_TRACKS);
 
     // AudioMixer is not yet capable of more than 32 active track inputs
     ALOG_ASSERT(32 >= MAX_NUM_TRACKS, "bad MAX_NUM_TRACKS %d", MAX_NUM_TRACKS);
 
-    // AudioMixer is not yet capable of multi-channel output beyond stereo
-    ALOG_ASSERT(2 == MAX_NUM_CHANNELS, "bad MAX_NUM_CHANNELS %d", MAX_NUM_CHANNELS);
-
     pthread_once(&sOnceControl, &sInitRoutine);
 
     mState.enabledTracks= 0;
@@ -476,7 +473,7 @@
         // t->frameCount
         t->channelCount = audio_channel_count_from_out_mask(channelMask);
         t->enabled = false;
-        ALOGV_IF(channelMask != AUDIO_CHANNEL_OUT_STEREO,
+        ALOGV_IF(audio_channel_mask_get_bits(channelMask) != AUDIO_CHANNEL_OUT_STEREO,
                 "Non-stereo channel mask: %d\n", channelMask);
         t->channelMask = channelMask;
         t->sessionId = sessionId;
@@ -499,8 +496,11 @@
         t->mFormat = format;
         t->mMixerInFormat = kUseFloat && kUseNewMixer
                 ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
+        t->mMixerChannelMask = audio_channel_mask_from_representation_and_bits(
+                AUDIO_CHANNEL_REPRESENTATION_POSITION, AUDIO_CHANNEL_OUT_STEREO);
+        t->mMixerChannelCount = audio_channel_count_from_out_mask(t->mMixerChannelMask);
         // Check the downmixing (or upmixing) requirements.
-        status_t status = initTrackDownmix(t, n, channelMask);
+        status_t status = initTrackDownmix(t, n);
         if (status != OK) {
             ALOGE("AudioMixer::getTrackName invalid channelMask (%#x)", channelMask);
             return -1;
@@ -525,21 +525,69 @@
     }
  }
 
-status_t AudioMixer::initTrackDownmix(track_t* pTrack, int trackNum, audio_channel_mask_t mask)
-{
-    uint32_t channelCount = audio_channel_count_from_out_mask(mask);
-    ALOG_ASSERT((channelCount <= MAX_NUM_CHANNELS_TO_DOWNMIX) && channelCount);
-    status_t status = OK;
-    if (channelCount > MAX_NUM_CHANNELS) {
-        pTrack->channelMask = mask;
-        pTrack->channelCount = channelCount;
-        ALOGV("initTrackDownmix(track=%d, mask=0x%x) calls prepareTrackForDownmix()",
-                trackNum, mask);
-        status = prepareTrackForDownmix(pTrack, trackNum);
-    } else {
-        unprepareTrackForDownmix(pTrack, trackNum);
+// Called when channel masks have changed for a track name
+// TODO: Fix Downmixbufferprofider not to (possibly) change mixer input format,
+// which will simplify this logic.
+bool AudioMixer::setChannelMasks(int name,
+        audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask) {
+    track_t &track = mState.tracks[name];
+
+    if (trackChannelMask == track.channelMask
+            && mixerChannelMask == track.mMixerChannelMask) {
+        return false;  // no need to change
     }
-    return status;
+    // always recompute for both channel masks even if only one has changed.
+    const uint32_t trackChannelCount = audio_channel_count_from_out_mask(trackChannelMask);
+    const uint32_t mixerChannelCount = audio_channel_count_from_out_mask(mixerChannelMask);
+    const bool mixerChannelCountChanged = track.mMixerChannelCount != mixerChannelCount;
+
+    ALOG_ASSERT((trackChannelCount <= MAX_NUM_CHANNELS_TO_DOWNMIX)
+            && trackChannelCount
+            && mixerChannelCount);
+    track.channelMask = trackChannelMask;
+    track.channelCount = trackChannelCount;
+    track.mMixerChannelMask = mixerChannelMask;
+    track.mMixerChannelCount = mixerChannelCount;
+
+    // channel masks have changed, does this track need a downmixer?
+    // update to try using our desired format (if we aren't already using it)
+    const audio_format_t prevMixerInFormat = track.mMixerInFormat;
+    track.mMixerInFormat = kUseFloat && kUseNewMixer
+            ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
+    const status_t status = initTrackDownmix(&mState.tracks[name], name);
+    ALOGE_IF(status != OK,
+            "initTrackDownmix error %d, track channel mask %#x, mixer channel mask %#x",
+            status, track.channelMask, track.mMixerChannelMask);
+
+    const bool mixerInFormatChanged = prevMixerInFormat != track.mMixerInFormat;
+    if (mixerInFormatChanged) {
+        prepareTrackForReformat(&track, name); // because of downmixer, track format may change!
+    }
+
+    if (track.resampler && (mixerInFormatChanged || mixerChannelCountChanged)) {
+        // resampler input format or channels may have changed.
+        const uint32_t resetToSampleRate = track.sampleRate;
+        delete track.resampler;
+        track.resampler = NULL;
+        track.sampleRate = mSampleRate; // without resampler, track rate is device sample rate.
+        // recreate the resampler with updated format, channels, saved sampleRate.
+        track.setResampler(resetToSampleRate /*trackSampleRate*/, mSampleRate /*devSampleRate*/);
+    }
+    return true;
+}
+
+status_t AudioMixer::initTrackDownmix(track_t* pTrack, int trackName)
+{
+    // Only remix (upmix or downmix) if the track and mixer/device channel masks
+    // are not the same and not handled internally, as mono -> stereo currently is.
+    if (pTrack->channelMask != pTrack->mMixerChannelMask
+            && !(pTrack->channelMask == AUDIO_CHANNEL_OUT_MONO
+                    && pTrack->mMixerChannelMask == AUDIO_CHANNEL_OUT_STEREO)) {
+        return prepareTrackForDownmix(pTrack, trackName);
+    }
+    // no remix necessary
+    unprepareTrackForDownmix(pTrack, trackName);
+    return NO_ERROR;
 }
 
 void AudioMixer::unprepareTrackForDownmix(track_t* pTrack, int trackName __unused) {
@@ -564,8 +612,8 @@
     unprepareTrackForDownmix(pTrack, trackName);
     if (DownmixerBufferProvider::isMultichannelCapable()) {
         DownmixerBufferProvider* pDbp = new DownmixerBufferProvider(pTrack->channelMask,
-                /* pTrack->mMixerChannelMask */ audio_channel_out_mask_from_count(2),
-                /* pTrack->mMixerInFormat */ AUDIO_FORMAT_PCM_16_BIT,
+                pTrack->mMixerChannelMask,
+                AUDIO_FORMAT_PCM_16_BIT /* TODO: use pTrack->mMixerInFormat, now only PCM 16 */,
                 pTrack->sampleRate, pTrack->sessionId, kCopyBufferFrameCount);
 
         if (pDbp->isValid()) { // if constructor completed properly
@@ -576,9 +624,14 @@
         }
         delete pDbp;
     }
-    pTrack->downmixerBufferProvider = NULL;
+
+    // Effect downmixer does not accept the channel conversion.  Let's use our remixer.
+    RemixBufferProvider* pRbp = new RemixBufferProvider(pTrack->channelMask,
+            pTrack->mMixerChannelMask, pTrack->mMixerInFormat, kCopyBufferFrameCount);
+    // Remix always finds a conversion whereas Downmixer effect above may fail.
+    pTrack->downmixerBufferProvider = pRbp;
     reconfigureBufferProviders(pTrack);
-    return NO_INIT;
+    return NO_ERROR;
 }
 
 void AudioMixer::unprepareTrackForReformat(track_t* pTrack, int trackName __unused) {
@@ -748,23 +801,10 @@
     case TRACK:
         switch (param) {
         case CHANNEL_MASK: {
-            audio_channel_mask_t mask =
-                static_cast<audio_channel_mask_t>(reinterpret_cast<uintptr_t>(value));
-            if (track.channelMask != mask) {
-                uint32_t channelCount = audio_channel_count_from_out_mask(mask);
-                ALOG_ASSERT((channelCount <= MAX_NUM_CHANNELS_TO_DOWNMIX) && channelCount);
-                track.channelMask = mask;
-                track.channelCount = channelCount;
-                // the mask has changed, does this track need a downmixer?
-                // update to try using our desired format (if we aren't already using it)
-                track.mMixerInFormat = kUseFloat && kUseNewMixer
-                        ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
-                status_t status = initTrackDownmix(&mState.tracks[name], name, mask);
-                ALOGE_IF(status != OK,
-                        "Invalid channel mask %#x, initTrackDownmix returned %d",
-                        mask, status);
-                ALOGV("setParameter(TRACK, CHANNEL_MASK, %x)", mask);
-                prepareTrackForReformat(&track, name); // format may have changed
+            const audio_channel_mask_t trackChannelMask =
+                static_cast<audio_channel_mask_t>(valueInt);
+            if (setChannelMasks(name, trackChannelMask, track.mMixerChannelMask)) {
+                ALOGV("setParameter(TRACK, CHANNEL_MASK, %x)", trackChannelMask);
                 invalidateState(1 << name);
             }
             } break;
@@ -803,6 +843,14 @@
                 ALOGV("setParameter(TRACK, MIXER_FORMAT, %#x)", format);
             }
             } break;
+        case MIXER_CHANNEL_MASK: {
+            const audio_channel_mask_t mixerChannelMask =
+                    static_cast<audio_channel_mask_t>(valueInt);
+            if (setChannelMasks(name, track.channelMask, mixerChannelMask)) {
+                ALOGV("setParameter(TRACK, MIXER_CHANNEL_MASK, %#x)", mixerChannelMask);
+                invalidateState(1 << name);
+            }
+            } break;
         default:
             LOG_ALWAYS_FATAL("setParameter track: bad param %d", param);
         }
@@ -836,20 +884,6 @@
     case RAMP_VOLUME:
     case VOLUME:
         switch (param) {
-        case VOLUME0:
-        case VOLUME1:
-            if (setVolumeRampVariables(*reinterpret_cast<float*>(value),
-                    target == RAMP_VOLUME ? mState.frameCount : 0,
-                    &track.volume[param - VOLUME0], &track.prevVolume[param - VOLUME0],
-                    &track.volumeInc[param - VOLUME0],
-                    &track.mVolume[param - VOLUME0], &track.mPrevVolume[param - VOLUME0],
-                    &track.mVolumeInc[param - VOLUME0])) {
-                ALOGV("setParameter(%s, VOLUME%d: %04x)",
-                        target == VOLUME ? "VOLUME" : "RAMP_VOLUME", param - VOLUME0,
-                                track.volume[param - VOLUME0]);
-                invalidateState(1 << name);
-            }
-            break;
         case AUXLEVEL:
             if (setVolumeRampVariables(*reinterpret_cast<float*>(value),
                     target == RAMP_VOLUME ? mState.frameCount : 0,
@@ -861,7 +895,21 @@
             }
             break;
         default:
-            LOG_ALWAYS_FATAL("setParameter volume: bad param %d", param);
+            if ((unsigned)param >= VOLUME0 && (unsigned)param < VOLUME0 + MAX_NUM_VOLUMES) {
+                if (setVolumeRampVariables(*reinterpret_cast<float*>(value),
+                        target == RAMP_VOLUME ? mState.frameCount : 0,
+                        &track.volume[param - VOLUME0], &track.prevVolume[param - VOLUME0],
+                        &track.volumeInc[param - VOLUME0],
+                        &track.mVolume[param - VOLUME0], &track.mPrevVolume[param - VOLUME0],
+                        &track.mVolumeInc[param - VOLUME0])) {
+                    ALOGV("setParameter(%s, VOLUME%d: %04x)",
+                            target == VOLUME ? "VOLUME" : "RAMP_VOLUME", param - VOLUME0,
+                                    track.volume[param - VOLUME0]);
+                    invalidateState(1 << name);
+                }
+            } else {
+                LOG_ALWAYS_FATAL("setParameter volume: bad param %d", param);
+            }
         }
         break;
 
@@ -870,30 +918,36 @@
     }
 }
 
-bool AudioMixer::track_t::setResampler(uint32_t value, uint32_t devSampleRate)
+bool AudioMixer::track_t::setResampler(uint32_t trackSampleRate, uint32_t devSampleRate)
 {
-    if (value != devSampleRate || resampler != NULL) {
-        if (sampleRate != value) {
-            sampleRate = value;
+    if (trackSampleRate != devSampleRate || resampler != NULL) {
+        if (sampleRate != trackSampleRate) {
+            sampleRate = trackSampleRate;
             if (resampler == NULL) {
-                ALOGV("creating resampler from track %d Hz to device %d Hz", value, devSampleRate);
+                ALOGV("Creating resampler from track %d Hz to device %d Hz",
+                        trackSampleRate, devSampleRate);
                 AudioResampler::src_quality quality;
                 // force lowest quality level resampler if use case isn't music or video
                 // FIXME this is flawed for dynamic sample rates, as we choose the resampler
                 // quality level based on the initial ratio, but that could change later.
                 // Should have a way to distinguish tracks with static ratios vs. dynamic ratios.
-                if (!((value == 44100 && devSampleRate == 48000) ||
-                      (value == 48000 && devSampleRate == 44100))) {
+                if (!((trackSampleRate == 44100 && devSampleRate == 48000) ||
+                      (trackSampleRate == 48000 && devSampleRate == 44100))) {
                     quality = AudioResampler::DYN_LOW_QUALITY;
                 } else {
                     quality = AudioResampler::DEFAULT_QUALITY;
                 }
 
-                ALOGVV("Creating resampler with %d bits\n", bits);
+                // TODO: Remove MONO_HACK. Resampler sees #channels after the downmixer
+                // but if none exists, it is the channel count (1 for mono).
+                const int resamplerChannelCount = downmixerBufferProvider != NULL
+                        ? mMixerChannelCount : channelCount;
+                ALOGVV("Creating resampler:"
+                        " format(%#x) channels(%d) devSampleRate(%u) quality(%d)\n",
+                        mMixerInFormat, resamplerChannelCount, devSampleRate, quality);
                 resampler = AudioResampler::create(
                         mMixerInFormat,
-                        // the resampler sees the number of channels after the downmixer, if any
-                        (int) (downmixerBufferProvider != NULL ? MAX_NUM_CHANNELS : channelCount),
+                        resamplerChannelCount,
                         devSampleRate, quality);
                 resampler->setLocalTimeFreq(sLocalTimeFreq);
             }
@@ -919,20 +973,19 @@
 inline void AudioMixer::track_t::adjustVolumeRamp(bool aux, bool useFloat)
 {
     if (useFloat) {
-        for (uint32_t i=0 ; i<MAX_NUM_CHANNELS ; i++) {
+        for (uint32_t i = 0; i < MAX_NUM_VOLUMES; i++) {
             if (mVolumeInc[i] != 0 && fabs(mVolume[i] - mPrevVolume[i]) <= fabs(mVolumeInc[i])) {
                 volumeInc[i] = 0;
                 prevVolume[i] = volume[i] << 16;
                 mVolumeInc[i] = 0.;
                 mPrevVolume[i] = mVolume[i];
-
             } else {
                 //ALOGV("ramp: %f %f %f", mVolume[i], mPrevVolume[i], mVolumeInc[i]);
                 prevVolume[i] = u4_28_from_float(mPrevVolume[i]);
             }
         }
     } else {
-        for (uint32_t i=0 ; i<MAX_NUM_CHANNELS ; i++) {
+        for (uint32_t i = 0; i < MAX_NUM_VOLUMES; i++) {
             if (((volumeInc[i]>0) && (((prevVolume[i]+volumeInc[i])>>16) >= volume[i])) ||
                     ((volumeInc[i]<0) && (((prevVolume[i]+volumeInc[i])>>16) <= volume[i]))) {
                 volumeInc[i] = 0;
@@ -1051,18 +1104,21 @@
             if (n & NEEDS_RESAMPLE) {
                 all16BitsStereoNoResample = false;
                 resampling = true;
-                t.hook = getTrackHook(TRACKTYPE_RESAMPLE, FCC_2,
+                t.hook = getTrackHook(TRACKTYPE_RESAMPLE, t.mMixerChannelCount,
                         t.mMixerInFormat, t.mMixerFormat);
                 ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2,
                         "Track %d needs downmix + resample", i);
             } else {
                 if ((n & NEEDS_CHANNEL_COUNT__MASK) == NEEDS_CHANNEL_1){
-                    t.hook = getTrackHook(TRACKTYPE_NORESAMPLEMONO, FCC_2,
+                    t.hook = getTrackHook(
+                            t.mMixerChannelCount == 2 // TODO: MONO_HACK.
+                                ? TRACKTYPE_NORESAMPLEMONO : TRACKTYPE_NORESAMPLE,
+                            t.mMixerChannelCount,
                             t.mMixerInFormat, t.mMixerFormat);
                     all16BitsStereoNoResample = false;
                 }
                 if ((n & NEEDS_CHANNEL_COUNT__MASK) >= NEEDS_CHANNEL_2){
-                    t.hook = getTrackHook(TRACKTYPE_NORESAMPLE, FCC_2,
+                    t.hook = getTrackHook(TRACKTYPE_NORESAMPLE, t.mMixerChannelCount,
                             t.mMixerInFormat, t.mMixerFormat);
                     ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2,
                             "Track %d needs downmix", i);
@@ -1096,8 +1152,8 @@
                 if (countActiveTracks == 1) {
                     const int i = 31 - __builtin_clz(state->enabledTracks);
                     track_t& t = state->tracks[i];
-                    state->hook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK, FCC_2,
-                            t.mMixerInFormat, t.mMixerFormat);
+                    state->hook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK,
+                            t.mMixerChannelCount, t.mMixerInFormat, t.mMixerFormat);
                 }
             }
         }
@@ -1130,7 +1186,10 @@
             state->hook = process__nop;
         } else if (all16BitsStereoNoResample) {
             if (countActiveTracks == 1) {
-                state->hook = process__OneTrack16BitsStereoNoResampling;
+                const int i = 31 - __builtin_clz(state->enabledTracks);
+                track_t& t = state->tracks[i];
+                state->hook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK,
+                        t.mMixerChannelCount, t.mMixerInFormat, t.mMixerFormat);
             }
         }
     }
@@ -1147,9 +1206,8 @@
     if (aux != NULL) {
         // always resample with unity gain when sending to auxiliary buffer to be able
         // to apply send level after resampling
-        // TODO: modify each resampler to support aux channel?
         t->resampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
-        memset(temp, 0, outFrameCount * MAX_NUM_CHANNELS * sizeof(int32_t));
+        memset(temp, 0, outFrameCount * t->mMixerChannelCount * sizeof(int32_t));
         t->resampler->resample(temp, outFrameCount, t->bufferProvider);
         if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1]|t->auxInc)) {
             volumeRampStereo(t, out, outFrameCount, temp, aux);
@@ -1434,7 +1492,6 @@
 {
     ALOGVV("process__nop\n");
     uint32_t e0 = state->enabledTracks;
-    size_t sampleCount = state->frameCount * MAX_NUM_CHANNELS;
     while (e0) {
         // process by group of tracks with same output buffer to
         // avoid multiple memset() on same buffer
@@ -1453,7 +1510,7 @@
             }
             e0 &= ~(e1);
 
-            memset(t1.mainBuffer, 0, sampleCount
+            memset(t1.mainBuffer, 0, state->frameCount * t1.mMixerChannelCount
                     * audio_bytes_per_sample(t1.mMixerFormat));
         }
 
@@ -1538,8 +1595,8 @@
                     }
                     size_t inFrames = (t.frameCount > outFrames)?outFrames:t.frameCount;
                     if (inFrames > 0) {
-                        t.hook(&t, outTemp + (BLOCKSIZE-outFrames)*MAX_NUM_CHANNELS, inFrames,
-                                state->resampleTemp, aux);
+                        t.hook(&t, outTemp + (BLOCKSIZE - outFrames) * t.mMixerChannelCount,
+                                inFrames, state->resampleTemp, aux);
                         t.frameCount -= inFrames;
                         outFrames -= inFrames;
                         if (CC_UNLIKELY(aux != NULL)) {
@@ -1565,10 +1622,11 @@
             }
 
             convertMixerFormat(out, t1.mMixerFormat, outTemp, t1.mMixerInFormat,
-                    BLOCKSIZE * FCC_2);
+                    BLOCKSIZE * t1.mMixerChannelCount);
             // TODO: fix ugly casting due to choice of out pointer type
             out = reinterpret_cast<int32_t*>((uint8_t*)out
-                    + BLOCKSIZE * FCC_2 * audio_bytes_per_sample(t1.mMixerFormat));
+                    + BLOCKSIZE * t1.mMixerChannelCount
+                        * audio_bytes_per_sample(t1.mMixerFormat));
             numFrames += BLOCKSIZE;
         } while (numFrames < state->frameCount);
     }
@@ -1590,8 +1648,6 @@
     ALOGVV("process__genericResampling\n");
     // this const just means that local variable outTemp doesn't change
     int32_t* const outTemp = state->outputTemp;
-    const size_t size = sizeof(int32_t) * MAX_NUM_CHANNELS * state->frameCount;
-
     size_t numFrames = state->frameCount;
 
     uint32_t e0 = state->enabledTracks;
@@ -1612,7 +1668,7 @@
         }
         e0 &= ~(e1);
         int32_t *out = t1.mainBuffer;
-        memset(outTemp, 0, size);
+        memset(outTemp, 0, sizeof(*outTemp) * t1.mMixerChannelCount * state->frameCount);
         while (e1) {
             const int i = 31 - __builtin_clz(e1);
             e1 &= ~(1<<i);
@@ -1644,14 +1700,15 @@
                     if (CC_UNLIKELY(aux != NULL)) {
                         aux += outFrames;
                     }
-                    t.hook(&t, outTemp + outFrames*MAX_NUM_CHANNELS, t.buffer.frameCount,
+                    t.hook(&t, outTemp + outFrames * t.mMixerChannelCount, t.buffer.frameCount,
                             state->resampleTemp, aux);
                     outFrames += t.buffer.frameCount;
                     t.bufferProvider->releaseBuffer(&t.buffer);
                 }
             }
         }
-        convertMixerFormat(out, t1.mMixerFormat, outTemp, t1.mMixerInFormat, numFrames * FCC_2);
+        convertMixerFormat(out, t1.mMixerFormat,
+                outTemp, t1.mMixerInFormat, numFrames * t1.mMixerChannelCount);
     }
 }
 
@@ -1687,7 +1744,7 @@
         // been enabled for mixing.
         if (in == NULL || (((uintptr_t)in) & 3)) {
             memset(out, 0, numFrames
-                    * MAX_NUM_CHANNELS * audio_bytes_per_sample(t.mMixerFormat));
+                    * t.mMixerChannelCount * audio_bytes_per_sample(t.mMixerFormat));
             ALOGE_IF((((uintptr_t)in) & 3), "process stereo track: input buffer alignment pb: "
                                               "buffer %p track %d, channels %d, needs %08x",
                     in, i, t.channelCount, t.needs);
@@ -1864,31 +1921,129 @@
     DownmixerBufferProvider::init(); // for the downmixer
 }
 
-template <int MIXTYPE, int NCHAN, bool USEFLOATVOL, bool ADJUSTVOL,
+/* TODO: consider whether this level of optimization is necessary.
+ * Perhaps just stick with a single for loop.
+ */
+
+// Needs to derive a compile time constant (constexpr).  Could be targeted to go
+// to a MONOVOL mixtype based on MAX_NUM_VOLUMES, but that's an unnecessary complication.
+#define MIXTYPE_MONOVOL(mixtype) (mixtype == MIXTYPE_MULTI ? MIXTYPE_MULTI_MONOVOL : \
+        mixtype == MIXTYPE_MULTI_SAVEONLY ? MIXTYPE_MULTI_SAVEONLY_MONOVOL : mixtype)
+
+/* MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27)
+ */
+template <int MIXTYPE,
+        typename TO, typename TI, typename TV, typename TA, typename TAV>
+static void volumeRampMulti(uint32_t channels, TO* out, size_t frameCount,
+        const TI* in, TA* aux, TV *vol, const TV *volinc, TAV *vola, TAV volainc)
+{
+    switch (channels) {
+    case 1:
+        volumeRampMulti<MIXTYPE, 1>(out, frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    case 2:
+        volumeRampMulti<MIXTYPE, 2>(out, frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    case 3:
+        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 3>(out,
+                frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    case 4:
+        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 4>(out,
+                frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    case 5:
+        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 5>(out,
+                frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    case 6:
+        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 6>(out,
+                frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    case 7:
+        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 7>(out,
+                frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    case 8:
+        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 8>(out,
+                frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    }
+}
+
+/* MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27)
+ */
+template <int MIXTYPE,
+        typename TO, typename TI, typename TV, typename TA, typename TAV>
+static void volumeMulti(uint32_t channels, TO* out, size_t frameCount,
+        const TI* in, TA* aux, const TV *vol, TAV vola)
+{
+    switch (channels) {
+    case 1:
+        volumeMulti<MIXTYPE, 1>(out, frameCount, in, aux, vol, vola);
+        break;
+    case 2:
+        volumeMulti<MIXTYPE, 2>(out, frameCount, in, aux, vol, vola);
+        break;
+    case 3:
+        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 3>(out, frameCount, in, aux, vol, vola);
+        break;
+    case 4:
+        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 4>(out, frameCount, in, aux, vol, vola);
+        break;
+    case 5:
+        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 5>(out, frameCount, in, aux, vol, vola);
+        break;
+    case 6:
+        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 6>(out, frameCount, in, aux, vol, vola);
+        break;
+    case 7:
+        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 7>(out, frameCount, in, aux, vol, vola);
+        break;
+    case 8:
+        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 8>(out, frameCount, in, aux, vol, vola);
+        break;
+    }
+}
+
+/* MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * USEFLOATVOL (set to true if float volume is used)
+ * ADJUSTVOL   (set to true if volume ramp parameters needs adjustment afterwards)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27)
+ */
+template <int MIXTYPE, bool USEFLOATVOL, bool ADJUSTVOL,
     typename TO, typename TI, typename TA>
 void AudioMixer::volumeMix(TO *out, size_t outFrames,
         const TI *in, TA *aux, bool ramp, AudioMixer::track_t *t)
 {
     if (USEFLOATVOL) {
         if (ramp) {
-            volumeRampMulti<MIXTYPE, NCHAN>(out, outFrames, in, aux,
+            volumeRampMulti<MIXTYPE>(t->mMixerChannelCount, out, outFrames, in, aux,
                     t->mPrevVolume, t->mVolumeInc, &t->prevAuxLevel, t->auxInc);
             if (ADJUSTVOL) {
                 t->adjustVolumeRamp(aux != NULL, true);
             }
         } else {
-            volumeMulti<MIXTYPE, NCHAN>(out, outFrames, in, aux,
+            volumeMulti<MIXTYPE>(t->mMixerChannelCount, out, outFrames, in, aux,
                     t->mVolume, t->auxLevel);
         }
     } else {
         if (ramp) {
-            volumeRampMulti<MIXTYPE, NCHAN>(out, outFrames, in, aux,
+            volumeRampMulti<MIXTYPE>(t->mMixerChannelCount, out, outFrames, in, aux,
                     t->prevVolume, t->volumeInc, &t->prevAuxLevel, t->auxInc);
             if (ADJUSTVOL) {
                 t->adjustVolumeRamp(aux != NULL);
             }
         } else {
-            volumeMulti<MIXTYPE, NCHAN>(out, outFrames, in, aux,
+            volumeMulti<MIXTYPE>(t->mMixerChannelCount, out, outFrames, in, aux,
                     t->volume, t->auxLevel);
         }
     }
@@ -1897,8 +2052,13 @@
 /* This process hook is called when there is a single track without
  * aux buffer, volume ramp, or resampling.
  * TODO: Update the hook selection: this can properly handle aux and ramp.
+ *
+ * MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27)
  */
-template <int MIXTYPE, int NCHAN, typename TO, typename TI, typename TA>
+template <int MIXTYPE, typename TO, typename TI, typename TA>
 void AudioMixer::process_NoResampleOneTrack(state_t* state, int64_t pts)
 {
     ALOGVV("process_NoResampleOneTrack\n");
@@ -1906,6 +2066,7 @@
     const int i = 31 - __builtin_clz(state->enabledTracks);
     ALOG_ASSERT((1 << i) == state->enabledTracks, "more than 1 track enabled");
     track_t *t = &state->tracks[i];
+    const uint32_t channels = t->mMixerChannelCount;
     TO* out = reinterpret_cast<TO*>(t->mainBuffer);
     TA* aux = reinterpret_cast<TA*>(t->auxBuffer);
     const bool ramp = t->needsRamp();
@@ -1922,7 +2083,7 @@
         // been enabled for mixing.
         if (in == NULL || (((uintptr_t)in) & 3)) {
             memset(out, 0, numFrames
-                    * NCHAN * audio_bytes_per_sample(t->mMixerFormat));
+                    * channels * audio_bytes_per_sample(t->mMixerFormat));
             ALOGE_IF((((uintptr_t)in) & 3), "process_NoResampleOneTrack: bus error: "
                     "buffer %p track %p, channels %d, needs %#x",
                     in, t, t->channelCount, t->needs);
@@ -1930,12 +2091,12 @@
         }
 
         const size_t outFrames = b.frameCount;
-        volumeMix<MIXTYPE, NCHAN, is_same<TI, float>::value, false> (out,
-                outFrames, in, aux, ramp, t);
+        volumeMix<MIXTYPE, is_same<TI, float>::value, false> (
+                out, outFrames, in, aux, ramp, t);
 
-        out += outFrames * NCHAN;
+        out += outFrames * channels;
         if (aux != NULL) {
-            aux += NCHAN;
+            aux += channels;
         }
         numFrames -= b.frameCount;
 
@@ -1949,24 +2110,28 @@
 
 /* This track hook is called to do resampling then mixing,
  * pulling from the track's upstream AudioBufferProvider.
+ *
+ * MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27)
  */
-template <int MIXTYPE, int NCHAN, typename TO, typename TI, typename TA>
+template <int MIXTYPE, typename TO, typename TI, typename TA>
 void AudioMixer::track__Resample(track_t* t, TO* out, size_t outFrameCount, TO* temp, TA* aux)
 {
     ALOGVV("track__Resample\n");
     t->resampler->setSampleRate(t->sampleRate);
-
     const bool ramp = t->needsRamp();
     if (ramp || aux != NULL) {
         // if ramp:        resample with unity gain to temp buffer and scale/mix in 2nd step.
         // if aux != NULL: resample with unity gain to temp buffer then apply send level.
 
         t->resampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
-        memset(temp, 0, outFrameCount * NCHAN * sizeof(TO));
+        memset(temp, 0, outFrameCount * t->mMixerChannelCount * sizeof(TO));
         t->resampler->resample((int32_t*)temp, outFrameCount, t->bufferProvider);
 
-        volumeMix<MIXTYPE, NCHAN, is_same<TI, float>::value, true>(out, outFrameCount,
-                temp, aux, ramp, t);
+        volumeMix<MIXTYPE, is_same<TI, float>::value, true>(
+                out, outFrameCount, temp, aux, ramp, t);
 
     } else { // constant volume gain
         t->resampler->setVolume(t->mVolume[0], t->mVolume[1]);
@@ -1976,20 +2141,25 @@
 
 /* This track hook is called to mix a track, when no resampling is required.
  * The input buffer should be present in t->in.
+ *
+ * MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27)
  */
-template <int MIXTYPE, int NCHAN, typename TO, typename TI, typename TA>
+template <int MIXTYPE, typename TO, typename TI, typename TA>
 void AudioMixer::track__NoResample(track_t* t, TO* out, size_t frameCount,
         TO* temp __unused, TA* aux)
 {
     ALOGVV("track__NoResample\n");
     const TI *in = static_cast<const TI *>(t->in);
 
-    volumeMix<MIXTYPE, NCHAN, is_same<TI, float>::value, true>(out, frameCount,
-            in, aux, t->needsRamp(), t);
+    volumeMix<MIXTYPE, is_same<TI, float>::value, true>(
+            out, frameCount, in, aux, t->needsRamp(), t);
 
     // MIXTYPE_MONOEXPAND reads a single input channel and expands to NCHAN output channels.
     // MIXTYPE_MULTI reads NCHAN input channels and places to NCHAN output channels.
-    in += (MIXTYPE == MIXTYPE_MONOEXPAND) ? frameCount : frameCount * NCHAN;
+    in += (MIXTYPE == MIXTYPE_MONOEXPAND) ? frameCount : frameCount * t->mMixerChannelCount;
     t->in = in;
 }
 
@@ -2036,10 +2206,10 @@
 
 /* Returns the proper track hook to use for mixing the track into the output buffer.
  */
-AudioMixer::hook_t AudioMixer::getTrackHook(int trackType, int channels,
+AudioMixer::hook_t AudioMixer::getTrackHook(int trackType, uint32_t channelCount,
         audio_format_t mixerInFormat, audio_format_t mixerOutFormat __unused)
 {
-    if (!kUseNewMixer && channels == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) {
+    if (!kUseNewMixer && channelCount == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) {
         switch (trackType) {
         case TRACKTYPE_NOP:
             return track__nop;
@@ -2054,7 +2224,7 @@
             break;
         }
     }
-    LOG_ALWAYS_FATAL_IF(channels != FCC_2); // TODO: must be stereo right now
+    LOG_ALWAYS_FATAL_IF(channelCount > MAX_NUM_CHANNELS);
     switch (trackType) {
     case TRACKTYPE_NOP:
         return track__nop;
@@ -2062,10 +2232,10 @@
         switch (mixerInFormat) {
         case AUDIO_FORMAT_PCM_FLOAT:
             return (AudioMixer::hook_t)
-                    track__Resample<MIXTYPE_MULTI, 2, float, float, int32_t>;
+                    track__Resample<MIXTYPE_MULTI, float /*TO*/, float /*TI*/, int32_t /*TA*/>;
         case AUDIO_FORMAT_PCM_16_BIT:
             return (AudioMixer::hook_t)\
-                    track__Resample<MIXTYPE_MULTI, 2, int32_t, int16_t, int32_t>;
+                    track__Resample<MIXTYPE_MULTI, int32_t, int16_t, int32_t>;
         default:
             LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
             break;
@@ -2075,10 +2245,10 @@
         switch (mixerInFormat) {
         case AUDIO_FORMAT_PCM_FLOAT:
             return (AudioMixer::hook_t)
-                    track__NoResample<MIXTYPE_MONOEXPAND, 2, float, float, int32_t>;
+                    track__NoResample<MIXTYPE_MONOEXPAND, float, float, int32_t>;
         case AUDIO_FORMAT_PCM_16_BIT:
             return (AudioMixer::hook_t)
-                    track__NoResample<MIXTYPE_MONOEXPAND, 2, int32_t, int16_t, int32_t>;
+                    track__NoResample<MIXTYPE_MONOEXPAND, int32_t, int16_t, int32_t>;
         default:
             LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
             break;
@@ -2088,10 +2258,10 @@
         switch (mixerInFormat) {
         case AUDIO_FORMAT_PCM_FLOAT:
             return (AudioMixer::hook_t)
-                    track__NoResample<MIXTYPE_MULTI, 2, float, float, int32_t>;
+                    track__NoResample<MIXTYPE_MULTI, float, float, int32_t>;
         case AUDIO_FORMAT_PCM_16_BIT:
             return (AudioMixer::hook_t)
-                    track__NoResample<MIXTYPE_MULTI, 2, int32_t, int16_t, int32_t>;
+                    track__NoResample<MIXTYPE_MULTI, int32_t, int16_t, int32_t>;
         default:
             LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
             break;
@@ -2107,25 +2277,25 @@
 /* Returns the proper process hook for mixing tracks. Currently works only for
  * PROCESSTYPE_NORESAMPLEONETRACK, a mix involving one track, no resampling.
  */
-AudioMixer::process_hook_t AudioMixer::getProcessHook(int processType, int channels,
+AudioMixer::process_hook_t AudioMixer::getProcessHook(int processType, uint32_t channelCount,
         audio_format_t mixerInFormat, audio_format_t mixerOutFormat)
 {
     if (processType != PROCESSTYPE_NORESAMPLEONETRACK) { // Only NORESAMPLEONETRACK
         LOG_ALWAYS_FATAL("bad processType: %d", processType);
         return NULL;
     }
-    if (!kUseNewMixer && channels == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) {
+    if (!kUseNewMixer && channelCount == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) {
         return process__OneTrack16BitsStereoNoResampling;
     }
-    LOG_ALWAYS_FATAL_IF(channels != FCC_2); // TODO: must be stereo right now
+    LOG_ALWAYS_FATAL_IF(channelCount > MAX_NUM_CHANNELS);
     switch (mixerInFormat) {
     case AUDIO_FORMAT_PCM_FLOAT:
         switch (mixerOutFormat) {
         case AUDIO_FORMAT_PCM_FLOAT:
-            return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY, 2,
-                    float, float, int32_t>;
+            return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY,
+                    float /*TO*/, float /*TI*/, int32_t /*TA*/>;
         case AUDIO_FORMAT_PCM_16_BIT:
-            return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY, 2,
+            return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY,
                     int16_t, float, int32_t>;
         default:
             LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
@@ -2135,10 +2305,10 @@
     case AUDIO_FORMAT_PCM_16_BIT:
         switch (mixerOutFormat) {
         case AUDIO_FORMAT_PCM_FLOAT:
-            return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY, 2,
+            return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY,
                     float, int16_t, int32_t>;
         case AUDIO_FORMAT_PCM_16_BIT:
-            return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY, 2,
+            return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY,
                     int16_t, int16_t, int32_t>;
         default:
             LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h
index 09a4d89..5ba377b 100644
--- a/services/audioflinger/AudioMixer.h
+++ b/services/audioflinger/AudioMixer.h
@@ -26,7 +26,7 @@
 #include <media/AudioBufferProvider.h>
 #include "AudioResampler.h"
 
-#include <audio_effects/effect_downmix.h>
+#include <hardware/audio_effect.h>
 #include <system/audio.h>
 #include <media/nbaio/NBLog.h>
 
@@ -51,12 +51,11 @@
     static const uint32_t MAX_NUM_TRACKS = 32;
     // maximum number of channels supported by the mixer
 
-    // This mixer has a hard-coded upper limit of 2 channels for output.
-    // There is support for > 2 channel tracks down-mixed to 2 channel output via a down-mix effect.
-    // Adding support for > 2 channel output would require more than simply changing this value.
-    static const uint32_t MAX_NUM_CHANNELS = 2;
+    // This mixer has a hard-coded upper limit of 8 channels for output.
+    static const uint32_t MAX_NUM_CHANNELS = 8;
+    static const uint32_t MAX_NUM_VOLUMES = 2; // stereo volume only
     // maximum number of channels supported for the content
-    static const uint32_t MAX_NUM_CHANNELS_TO_DOWNMIX = 8;
+    static const uint32_t MAX_NUM_CHANNELS_TO_DOWNMIX = AUDIO_CHANNEL_COUNT_MAX;
 
     static const uint16_t UNITY_GAIN_INT = 0x1000;
     static const float    UNITY_GAIN_FLOAT = 1.0f;
@@ -82,6 +81,7 @@
         AUX_BUFFER      = 0x4003,
         DOWNMIX_TYPE    = 0X4004,
         MIXER_FORMAT    = 0x4005, // AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
+        MIXER_CHANNEL_MASK = 0x4006, // Channel mask for mixer output
         // for target RESAMPLE
         SAMPLE_RATE     = 0x4100, // Configure sample rate conversion on this track name;
                                   // parameter 'value' is the new sample rate in Hz.
@@ -164,15 +164,15 @@
 
         // TODO: Eventually remove legacy integer volume settings
         union {
-        int16_t     volume[MAX_NUM_CHANNELS]; // U4.12 fixed point (top bit should be zero)
+        int16_t     volume[MAX_NUM_VOLUMES]; // U4.12 fixed point (top bit should be zero)
         int32_t     volumeRL;
         };
 
-        int32_t     prevVolume[MAX_NUM_CHANNELS];
+        int32_t     prevVolume[MAX_NUM_VOLUMES];
 
         // 16-byte boundary
 
-        int32_t     volumeInc[MAX_NUM_CHANNELS];
+        int32_t     volumeInc[MAX_NUM_VOLUMES];
         int32_t     auxInc;
         int32_t     prevAuxLevel;
 
@@ -217,18 +217,20 @@
         audio_format_t mMixerInFormat;   // mix internal format AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
                                          // each track must be converted to this format.
 
-        float          mVolume[MAX_NUM_CHANNELS];     // floating point set volume
-        float          mPrevVolume[MAX_NUM_CHANNELS]; // floating point previous volume
-        float          mVolumeInc[MAX_NUM_CHANNELS];  // floating point volume increment
+        float          mVolume[MAX_NUM_VOLUMES];     // floating point set volume
+        float          mPrevVolume[MAX_NUM_VOLUMES]; // floating point previous volume
+        float          mVolumeInc[MAX_NUM_VOLUMES];  // floating point volume increment
 
         float          mAuxLevel;                     // floating point set aux level
         float          mPrevAuxLevel;                 // floating point prev aux level
         float          mAuxInc;                       // floating point aux increment
 
         // 16-byte boundary
+        audio_channel_mask_t mMixerChannelMask;
+        uint32_t             mMixerChannelCount;
 
         bool        needsRamp() { return (volumeInc[0] | volumeInc[1] | auxInc) != 0; }
-        bool        setResampler(uint32_t sampleRate, uint32_t devSampleRate);
+        bool        setResampler(uint32_t trackSampleRate, uint32_t devSampleRate);
         bool        doesResample() const { return resampler != NULL; }
         void        resetResampler() { if (resampler != NULL) resampler->reset(); }
         void        adjustVolumeRamp(bool aux, bool useFloat = false);
@@ -377,7 +379,11 @@
     // OK to call more often than that, but unnecessary.
     void invalidateState(uint32_t mask);
 
-    static status_t initTrackDownmix(track_t* pTrack, int trackNum, audio_channel_mask_t mask);
+    bool setChannelMasks(int name,
+            audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask);
+
+    // TODO: remove unused trackName/trackNum from functions below.
+    static status_t initTrackDownmix(track_t* pTrack, int trackName);
     static status_t prepareTrackForDownmix(track_t* pTrack, int trackNum);
     static void unprepareTrackForDownmix(track_t* pTrack, int trackName);
     static status_t prepareTrackForReformat(track_t* pTrack, int trackNum);
@@ -418,27 +424,26 @@
      * in AudioMixerOps.h).  The template parameters are as follows:
      *
      *   MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
-     *   NCHAN       (number of channels, 2 for now)
      *   USEFLOATVOL (set to true if float volume is used)
      *   ADJUSTVOL   (set to true if volume ramp parameters needs adjustment afterwards)
      *   TO: int32_t (Q4.27) or float
      *   TI: int32_t (Q4.27) or int16_t (Q0.15) or float
      *   TA: int32_t (Q4.27)
      */
-    template <int MIXTYPE, int NCHAN, bool USEFLOATVOL, bool ADJUSTVOL,
+    template <int MIXTYPE, bool USEFLOATVOL, bool ADJUSTVOL,
         typename TO, typename TI, typename TA>
     static void volumeMix(TO *out, size_t outFrames,
             const TI *in, TA *aux, bool ramp, AudioMixer::track_t *t);
 
     // multi-format process hooks
-    template <int MIXTYPE, int NCHAN, typename TO, typename TI, typename TA>
+    template <int MIXTYPE, typename TO, typename TI, typename TA>
     static void process_NoResampleOneTrack(state_t* state, int64_t pts);
 
     // multi-format track hooks
-    template <int MIXTYPE, int NCHAN, typename TO, typename TI, typename TA>
+    template <int MIXTYPE, typename TO, typename TI, typename TA>
     static void track__Resample(track_t* t, TO* out, size_t frameCount,
             TO* temp __unused, TA* aux);
-    template <int MIXTYPE, int NCHAN, typename TO, typename TI, typename TA>
+    template <int MIXTYPE, typename TO, typename TI, typename TA>
     static void track__NoResample(track_t* t, TO* out, size_t frameCount,
             TO* temp __unused, TA* aux);
 
@@ -457,9 +462,9 @@
     };
 
     // functions for determining the proper process and track hooks.
-    static process_hook_t getProcessHook(int processType, int channels,
+    static process_hook_t getProcessHook(int processType, uint32_t channelCount,
             audio_format_t mixerInFormat, audio_format_t mixerOutFormat);
-    static hook_t getTrackHook(int trackType, int channels,
+    static hook_t getTrackHook(int trackType, uint32_t channelCount,
             audio_format_t mixerInFormat, audio_format_t mixerOutFormat);
 };
 
diff --git a/services/audioflinger/AudioMixerOps.h b/services/audioflinger/AudioMixerOps.h
index ad739ff..49131f6 100644
--- a/services/audioflinger/AudioMixerOps.h
+++ b/services/audioflinger/AudioMixerOps.h
@@ -230,6 +230,8 @@
     MIXTYPE_MULTI,
     MIXTYPE_MONOEXPAND,
     MIXTYPE_MULTI_SAVEONLY,
+    MIXTYPE_MULTI_MONOVOL,
+    MIXTYPE_MULTI_SAVEONLY_MONOVOL,
 };
 
 /*
@@ -263,6 +265,13 @@
  *   vol: represents a volume array.
  *
  *   MIXTYPE_MULTI_SAVEONLY does not accumulate into the out pointer.
+ *
+ * MIXTYPE_MULTI_MONOVOL:
+ *   Same as MIXTYPE_MULTI, but uses only volume[0].
+ *
+ * MIXTYPE_MULTI_SAVEONLY_MONOVOL:
+ *   Same as MIXTYPE_MULTI_SAVEONLY, but uses only volume[0].
+ *
  */
 
 template <int MIXTYPE, int NCHAN,
@@ -283,12 +292,6 @@
                     vol[i] += volinc[i];
                 }
                 break;
-            case MIXTYPE_MULTI_SAVEONLY:
-                for (int i = 0; i < NCHAN; ++i) {
-                    *out++ = MixMulAux<TO, TI, TV, TA>(*in++, vol[i], &auxaccum);
-                    vol[i] += volinc[i];
-                }
-                break;
             case MIXTYPE_MONOEXPAND:
                 for (int i = 0; i < NCHAN; ++i) {
                     *out++ += MixMulAux<TO, TI, TV, TA>(*in, vol[i], &auxaccum);
@@ -296,6 +299,24 @@
                 }
                 in++;
                 break;
+            case MIXTYPE_MULTI_SAVEONLY:
+                for (int i = 0; i < NCHAN; ++i) {
+                    *out++ = MixMulAux<TO, TI, TV, TA>(*in++, vol[i], &auxaccum);
+                    vol[i] += volinc[i];
+                }
+                break;
+            case MIXTYPE_MULTI_MONOVOL:
+                for (int i = 0; i < NCHAN; ++i) {
+                    *out++ += MixMulAux<TO, TI, TV, TA>(*in++, vol[0], &auxaccum);
+                }
+                vol[0] += volinc[0];
+                break;
+            case MIXTYPE_MULTI_SAVEONLY_MONOVOL:
+                for (int i = 0; i < NCHAN; ++i) {
+                    *out++ = MixMulAux<TO, TI, TV, TA>(*in++, vol[0], &auxaccum);
+                }
+                vol[0] += volinc[0];
+                break;
             default:
                 LOG_ALWAYS_FATAL("invalid mixtype %d", MIXTYPE);
                 break;
@@ -313,12 +334,6 @@
                     vol[i] += volinc[i];
                 }
                 break;
-            case MIXTYPE_MULTI_SAVEONLY:
-                for (int i = 0; i < NCHAN; ++i) {
-                    *out++ = MixMul<TO, TI, TV>(*in++, vol[i]);
-                    vol[i] += volinc[i];
-                }
-                break;
             case MIXTYPE_MONOEXPAND:
                 for (int i = 0; i < NCHAN; ++i) {
                     *out++ += MixMul<TO, TI, TV>(*in, vol[i]);
@@ -326,6 +341,24 @@
                 }
                 in++;
                 break;
+            case MIXTYPE_MULTI_SAVEONLY:
+                for (int i = 0; i < NCHAN; ++i) {
+                    *out++ = MixMul<TO, TI, TV>(*in++, vol[i]);
+                    vol[i] += volinc[i];
+                }
+                break;
+            case MIXTYPE_MULTI_MONOVOL:
+                for (int i = 0; i < NCHAN; ++i) {
+                    *out++ += MixMul<TO, TI, TV>(*in++, vol[0]);
+                }
+                vol[0] += volinc[0];
+                break;
+            case MIXTYPE_MULTI_SAVEONLY_MONOVOL:
+                for (int i = 0; i < NCHAN; ++i) {
+                    *out++ = MixMul<TO, TI, TV>(*in++, vol[0]);
+                }
+                vol[0] += volinc[0];
+                break;
             default:
                 LOG_ALWAYS_FATAL("invalid mixtype %d", MIXTYPE);
                 break;
@@ -351,17 +384,27 @@
                     *out++ += MixMulAux<TO, TI, TV, TA>(*in++, vol[i], &auxaccum);
                 }
                 break;
-            case MIXTYPE_MULTI_SAVEONLY:
-                for (int i = 0; i < NCHAN; ++i) {
-                    *out++ = MixMulAux<TO, TI, TV, TA>(*in++, vol[i], &auxaccum);
-                }
-                break;
             case MIXTYPE_MONOEXPAND:
                 for (int i = 0; i < NCHAN; ++i) {
                     *out++ += MixMulAux<TO, TI, TV, TA>(*in, vol[i], &auxaccum);
                 }
                 in++;
                 break;
+            case MIXTYPE_MULTI_SAVEONLY:
+                for (int i = 0; i < NCHAN; ++i) {
+                    *out++ = MixMulAux<TO, TI, TV, TA>(*in++, vol[i], &auxaccum);
+                }
+                break;
+            case MIXTYPE_MULTI_MONOVOL:
+                for (int i = 0; i < NCHAN; ++i) {
+                    *out++ += MixMulAux<TO, TI, TV, TA>(*in++, vol[0], &auxaccum);
+                }
+                break;
+            case MIXTYPE_MULTI_SAVEONLY_MONOVOL:
+                for (int i = 0; i < NCHAN; ++i) {
+                    *out++ = MixMulAux<TO, TI, TV, TA>(*in++, vol[0], &auxaccum);
+                }
+                break;
             default:
                 LOG_ALWAYS_FATAL("invalid mixtype %d", MIXTYPE);
                 break;
@@ -377,17 +420,27 @@
                     *out++ += MixMul<TO, TI, TV>(*in++, vol[i]);
                 }
                 break;
-            case MIXTYPE_MULTI_SAVEONLY:
-                for (int i = 0; i < NCHAN; ++i) {
-                    *out++ = MixMul<TO, TI, TV>(*in++, vol[i]);
-                }
-                break;
             case MIXTYPE_MONOEXPAND:
                 for (int i = 0; i < NCHAN; ++i) {
                     *out++ += MixMul<TO, TI, TV>(*in, vol[i]);
                 }
                 in++;
                 break;
+            case MIXTYPE_MULTI_SAVEONLY:
+                for (int i = 0; i < NCHAN; ++i) {
+                    *out++ = MixMul<TO, TI, TV>(*in++, vol[i]);
+                }
+                break;
+            case MIXTYPE_MULTI_MONOVOL:
+                for (int i = 0; i < NCHAN; ++i) {
+                    *out++ += MixMul<TO, TI, TV>(*in++, vol[0]);
+                }
+                break;
+            case MIXTYPE_MULTI_SAVEONLY_MONOVOL:
+                for (int i = 0; i < NCHAN; ++i) {
+                    *out++ = MixMul<TO, TI, TV>(*in++, vol[0]);
+                }
+                break;
             default:
                 LOG_ALWAYS_FATAL("invalid mixtype %d", MIXTYPE);
                 break;
diff --git a/services/audioflinger/AudioResamplerFirProcess.h b/services/audioflinger/AudioResamplerFirProcess.h
index bb0f1c9..d130013 100644
--- a/services/audioflinger/AudioResamplerFirProcess.h
+++ b/services/audioflinger/AudioResamplerFirProcess.h
@@ -109,40 +109,25 @@
     }
 };
 
-/*
- * Helper template functions for interpolating filter coefficients.
- */
-
-template<typename TC, typename T>
-void adjustLerp(T& lerpP __unused)
-{
-}
-
-template<int32_t, typename T>
-void adjustLerp(T& lerpP)
-{
-    lerpP >>= 16;   // lerpP is 32bit for NEON int32_t, but always 16 bit for non-NEON path
-}
-
 template<typename TC, typename TINTERP>
-static inline
+inline
 TC interpolate(TC coef_0, TC coef_1, TINTERP lerp)
 {
     return lerp * (coef_1 - coef_0) + coef_0;
 }
 
-template<int16_t, uint32_t>
-static inline
-int16_t interpolate(int16_t coef_0, int16_t coef_1, uint32_t lerp)
-{
-    return (static_cast<int16_t>(lerp) * ((coef_1-coef_0)<<1)>>16) + coef_0;
+template<>
+inline
+int16_t interpolate<int16_t, uint32_t>(int16_t coef_0, int16_t coef_1, uint32_t lerp)
+{   // in some CPU architectures 16b x 16b multiplies are faster.
+    return (static_cast<int16_t>(lerp) * static_cast<int16_t>(coef_1 - coef_0) >> 15) + coef_0;
 }
 
-template<int32_t, uint32_t>
-static inline
-int32_t interpolate(int32_t coef_0, int32_t coef_1, uint32_t lerp)
+template<>
+inline
+int32_t interpolate<int32_t, uint32_t>(int32_t coef_0, int32_t coef_1, uint32_t lerp)
 {
-    return mulAdd(static_cast<int16_t>(lerp), (coef_1-coef_0)<<1, coef_0);
+    return (lerp * static_cast<int64_t>(coef_1 - coef_0) >> 31) + coef_0;
 }
 
 /* class scope for passing in functions into templates */
@@ -283,7 +268,6 @@
         TINTERP lerpP,
         const TO* const volumeLR)
 {
-    adjustLerp<TC, TINTERP>(lerpP); // coefficient type adjustment for interpolations
     ProcessBase<CHANNELS, STRIDE, InterpCompute>(out, count, coefsP, coefsN, sP, sN, lerpP, volumeLR);
 }
 
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index c486630..9e15293 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -55,6 +55,7 @@
     mixer(NULL),
     mSinkBuffer(NULL),
     mSinkBufferSize(0),
+    mSinkChannelCount(FCC_2),
     mMixerBuffer(NULL),
     mMixerBufferSize(0),
     mMixerBufferFormat(AUDIO_FORMAT_PCM_16_BIT),
@@ -71,6 +72,9 @@
     current = &initial;
 
     mDummyDumpState = &dummyDumpState;
+    // TODO: Add channel mask to NBAIO_Format.
+    // We assume that the channel mask must be a valid positional channel mask.
+    mSinkChannelMask = audio_channel_out_mask_from_count(mSinkChannelCount);
 
     unsigned i;
     for (i = 0; i < FastMixerState::kMaxFastTracks; ++i) {
@@ -148,10 +152,17 @@
         if (outputSink == NULL) {
             format = Format_Invalid;
             sampleRate = 0;
+            mSinkChannelCount = 0;
+            mSinkChannelMask = AUDIO_CHANNEL_NONE;
         } else {
             format = outputSink->format();
             sampleRate = Format_sampleRate(format);
-            ALOG_ASSERT(Format_channelCount(format) == FCC_2);
+            mSinkChannelCount = Format_channelCount(format);
+            LOG_ALWAYS_FATAL_IF(mSinkChannelCount > AudioMixer::MAX_NUM_CHANNELS);
+
+            // TODO: Add channel mask to NBAIO_Format
+            // We assume that the channel mask must be a valid positional channel mask.
+            mSinkChannelMask = audio_channel_out_mask_from_count(mSinkChannelCount);
         }
         dumpState->mSampleRate = sampleRate;
     }
@@ -169,10 +180,12 @@
             //       implementation; it would be better to have normal mixer allocate for us
             //       to avoid blocking here and to prevent possible priority inversion
             mixer = new AudioMixer(frameCount, sampleRate, FastMixerState::kMaxFastTracks);
-            const size_t mixerFrameSize = FCC_2 * audio_bytes_per_sample(mMixerBufferFormat);
+            const size_t mixerFrameSize = mSinkChannelCount
+                    * audio_bytes_per_sample(mMixerBufferFormat);
             mMixerBufferSize = mixerFrameSize * frameCount;
             (void)posix_memalign(&mMixerBuffer, 32, mMixerBufferSize);
-            const size_t sinkFrameSize = FCC_2 * audio_bytes_per_sample(format.mFormat);
+            const size_t sinkFrameSize = mSinkChannelCount
+                    * audio_bytes_per_sample(format.mFormat);
             if (sinkFrameSize > mixerFrameSize) { // need a sink buffer
                 mSinkBufferSize = sinkFrameSize * frameCount;
                 (void)posix_memalign(&mSinkBuffer, 32, mSinkBufferSize);
@@ -244,7 +257,7 @@
                 fastTrackNames[i] = name;
                 mixer->setBufferProvider(name, bufferProvider);
                 mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
-                        (void *) mMixerBuffer);
+                        (void *)mMixerBuffer);
                 // newly allocated track names default to full scale volume
                 mixer->setParameter(
                         name,
@@ -252,6 +265,10 @@
                         AudioMixer::MIXER_FORMAT, (void *)mMixerBufferFormat);
                 mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::FORMAT,
                         (void *)(uintptr_t)fastTrack->mFormat);
+                mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::CHANNEL_MASK,
+                        (void *)(uintptr_t)fastTrack->mChannelMask);
+                mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MIXER_CHANNEL_MASK,
+                        (void *)(uintptr_t)mSinkChannelMask);
                 mixer->enable(name);
             }
             generations[i] = fastTrack->mGeneration;
@@ -286,7 +303,9 @@
                     mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::FORMAT,
                             (void *)(uintptr_t)fastTrack->mFormat);
                     mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::CHANNEL_MASK,
-                            (void *)(uintptr_t) fastTrack->mChannelMask);
+                            (void *)(uintptr_t)fastTrack->mChannelMask);
+                    mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MIXER_CHANNEL_MASK,
+                            (void *)(uintptr_t)mSinkChannelMask);
                     // already enabled
                 }
                 generations[i] = fastTrack->mGeneration;
diff --git a/services/audioflinger/FastMixer.h b/services/audioflinger/FastMixer.h
index 4671670..fde8c2b 100644
--- a/services/audioflinger/FastMixer.h
+++ b/services/audioflinger/FastMixer.h
@@ -66,6 +66,8 @@
     void* mSinkBuffer;                  // used for mixer output format translation
                                         // if sink format is different than mixer output.
     size_t mSinkBufferSize;
+    uint32_t mSinkChannelCount;
+    audio_channel_mask_t mSinkChannelMask;
     void* mMixerBuffer;                 // mixer output buffer.
     size_t mMixerBufferSize;
     audio_format_t mMixerBufferFormat;  // mixer output format: AUDIO_FORMAT_PCM_(16_BIT|FLOAT).
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index 6d84296..bf509e7 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -142,102 +142,172 @@
     ALOGV("createAudioPatch() num_sources %d num_sinks %d handle %d",
           patch->num_sources, patch->num_sinks, *handle);
     status_t status = NO_ERROR;
-
     audio_patch_handle_t halHandle = AUDIO_PATCH_HANDLE_NONE;
-
     sp<AudioFlinger> audioflinger = mAudioFlinger.promote();
     if (audioflinger == 0) {
         return NO_INIT;
     }
+
     if (handle == NULL || patch == NULL) {
         return BAD_VALUE;
     }
-    // limit number of sources to 1 for now
-    if (patch->num_sources == 0 || patch->num_sources > 1 ||
+    // limit number of sources to 1 for now or 2 sources for special cross hw module case.
+    // only the audio policy manager can request a patch creation with 2 sources.
+    if (patch->num_sources == 0 || patch->num_sources > 2 ||
             patch->num_sinks == 0 || patch->num_sinks > AUDIO_PATCH_PORTS_MAX) {
         return BAD_VALUE;
     }
 
-    for (size_t index = 0; *handle != 0 && index < mPatches.size(); index++) {
-        if (*handle == mPatches[index]->mHandle) {
-            ALOGV("createAudioPatch() removing patch handle %d", *handle);
-            halHandle = mPatches[index]->mHalHandle;
-            mPatches.removeAt(index);
-            break;
+    if (*handle != AUDIO_PATCH_HANDLE_NONE) {
+        for (size_t index = 0; *handle != 0 && index < mPatches.size(); index++) {
+            if (*handle == mPatches[index]->mHandle) {
+                ALOGV("createAudioPatch() removing patch handle %d", *handle);
+                halHandle = mPatches[index]->mHalHandle;
+                mPatches.removeAt(index);
+                break;
+            }
         }
     }
 
+    Patch *newPatch = new Patch(patch);
+
     switch (patch->sources[0].type) {
         case AUDIO_PORT_TYPE_DEVICE: {
             // limit number of sinks to 1 for now
             if (patch->num_sinks > 1) {
-                return BAD_VALUE;
+                status = BAD_VALUE;
+                goto exit;
             }
             audio_module_handle_t src_module = patch->sources[0].ext.device.hw_module;
             ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(src_module);
             if (index < 0) {
                 ALOGW("createAudioPatch() bad src hw module %d", src_module);
-                return BAD_VALUE;
+                status = BAD_VALUE;
+                goto exit;
             }
             AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
             for (unsigned int i = 0; i < patch->num_sinks; i++) {
                 // reject connection to different sink types
                 if (patch->sinks[i].type != patch->sinks[0].type) {
                     ALOGW("createAudioPatch() different sink types in same patch not supported");
-                    return BAD_VALUE;
+                    status = BAD_VALUE;
+                    goto exit;
                 }
-                // limit to connections between sinks and sources on same HW module
-                if (patch->sinks[i].ext.mix.hw_module != src_module) {
-                    ALOGW("createAudioPatch() cannot connect source on module %d to "
-                            "sink on module %d", src_module, patch->sinks[i].ext.mix.hw_module);
-                    return BAD_VALUE;
-                }
-
-                // limit to connections between devices and output streams for HAL before 3.0
-                if ((audioHwDevice->version() < AUDIO_DEVICE_API_VERSION_3_0) &&
+                // limit to connections between devices and input streams for HAL before 3.0
+                if (patch->sinks[i].ext.mix.hw_module == src_module &&
+                        (audioHwDevice->version() < AUDIO_DEVICE_API_VERSION_3_0) &&
                         (patch->sinks[i].type != AUDIO_PORT_TYPE_MIX)) {
                     ALOGW("createAudioPatch() invalid sink type %d for device source",
                           patch->sinks[i].type);
-                    return BAD_VALUE;
+                    status = BAD_VALUE;
+                    goto exit;
                 }
             }
 
-            if (audioHwDevice->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
-                if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
-                    sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
-                                                                    patch->sinks[0].ext.mix.handle);
-                    if (thread == 0) {
-                        ALOGW("createAudioPatch() bad capture I/O handle %d",
-                                                                  patch->sinks[0].ext.mix.handle);
-                        return BAD_VALUE;
+            if (patch->sinks[0].ext.device.hw_module != src_module) {
+                // limit to device to device connection if not on same hw module
+                if (patch->sinks[0].type != AUDIO_PORT_TYPE_DEVICE) {
+                    ALOGW("createAudioPatch() invalid sink type for cross hw module");
+                    status = INVALID_OPERATION;
+                    goto exit;
+                }
+                // special case num sources == 2 -=> reuse an exiting output mix to connect to the
+                // sink
+                if (patch->num_sources == 2) {
+                    if (patch->sources[1].type != AUDIO_PORT_TYPE_MIX ||
+                            patch->sinks[0].ext.device.hw_module !=
+                                    patch->sources[1].ext.mix.hw_module) {
+                        ALOGW("createAudioPatch() invalid source combination");
+                        status = INVALID_OPERATION;
+                        goto exit;
                     }
-                    status = thread->sendCreateAudioPatchConfigEvent(patch, &halHandle);
+
+                    sp<ThreadBase> thread =
+                            audioflinger->checkPlaybackThread_l(patch->sources[1].ext.mix.handle);
+                    newPatch->mPlaybackThread = (MixerThread *)thread.get();
+                    if (thread == 0) {
+                        ALOGW("createAudioPatch() cannot get playback thread");
+                        status = INVALID_OPERATION;
+                        goto exit;
+                    }
                 } else {
-                    audio_hw_device_t *hwDevice = audioHwDevice->hwDevice();
-                    status = hwDevice->create_audio_patch(hwDevice,
-                                                           patch->num_sources,
-                                                           patch->sources,
-                                                           patch->num_sinks,
-                                                           patch->sinks,
-                                                           &halHandle);
+                    struct audio_config config;
+                    config.sample_rate = 0;
+                    config.channel_mask = AUDIO_CHANNEL_NONE;
+                    config.format = AUDIO_FORMAT_DEFAULT;
+                    newPatch->mPlaybackThread = audioflinger->openOutput_l(
+                                                             patch->sinks[0].ext.device.hw_module,
+                                                             patch->sinks[0].ext.device.type,
+                                                             &config,
+                                                             AUDIO_OUTPUT_FLAG_NONE);
+                    ALOGV("audioflinger->openOutput_l() returned %p",
+                                          newPatch->mPlaybackThread.get());
+                    if (newPatch->mPlaybackThread == 0) {
+                        status = NO_MEMORY;
+                        goto exit;
+                    }
+                }
+                uint32_t channelCount = newPatch->mPlaybackThread->channelCount();
+                audio_devices_t device = patch->sources[0].ext.device.type;
+                struct audio_config config;
+                audio_channel_mask_t inChannelMask = audio_channel_in_mask_from_count(channelCount);
+                config.sample_rate = newPatch->mPlaybackThread->sampleRate();
+                config.channel_mask = inChannelMask;
+                config.format = newPatch->mPlaybackThread->format();
+                newPatch->mRecordThread = audioflinger->openInput_l(src_module,
+                                                                    device,
+                                                                    &config,
+                                                                    AUDIO_INPUT_FLAG_NONE);
+                ALOGV("audioflinger->openInput_l() returned %p inChannelMask %08x",
+                      newPatch->mRecordThread.get(), inChannelMask);
+                if (newPatch->mRecordThread == 0) {
+                    status = NO_MEMORY;
+                    goto exit;
+                }
+                status = createPatchConnections(newPatch, patch);
+                if (status != NO_ERROR) {
+                    goto exit;
                 }
             } else {
-                sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
-                                                                    patch->sinks[0].ext.mix.handle);
-                if (thread == 0) {
-                    ALOGW("createAudioPatch() bad capture I/O handle %d",
-                                                                  patch->sinks[0].ext.mix.handle);
-                    return BAD_VALUE;
-                }
-                AudioParameter param;
-                param.addInt(String8(AudioParameter::keyRouting),
-                             (int)patch->sources[0].ext.device.type);
-                param.addInt(String8(AudioParameter::keyInputSource),
-                                                     (int)patch->sinks[0].ext.mix.usecase.source);
+                if (audioHwDevice->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
+                    if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
+                        sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
+                                                                        patch->sinks[0].ext.mix.handle);
+                        if (thread == 0) {
+                            ALOGW("createAudioPatch() bad capture I/O handle %d",
+                                                                      patch->sinks[0].ext.mix.handle);
+                            status = BAD_VALUE;
+                            goto exit;
+                        }
+                        status = thread->sendCreateAudioPatchConfigEvent(patch, &halHandle);
+                    } else {
+                        audio_hw_device_t *hwDevice = audioHwDevice->hwDevice();
+                        status = hwDevice->create_audio_patch(hwDevice,
+                                                               patch->num_sources,
+                                                               patch->sources,
+                                                               patch->num_sinks,
+                                                               patch->sinks,
+                                                               &halHandle);
+                    }
+                } else {
+                    sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
+                                                                        patch->sinks[0].ext.mix.handle);
+                    if (thread == 0) {
+                        ALOGW("createAudioPatch() bad capture I/O handle %d",
+                                                                      patch->sinks[0].ext.mix.handle);
+                        status = BAD_VALUE;
+                        goto exit;
+                    }
+                    AudioParameter param;
+                    param.addInt(String8(AudioParameter::keyRouting),
+                                 (int)patch->sources[0].ext.device.type);
+                    param.addInt(String8(AudioParameter::keyInputSource),
+                                                         (int)patch->sinks[0].ext.mix.usecase.source);
 
-                ALOGV("createAudioPatch() AUDIO_PORT_TYPE_DEVICE setParameters %s",
-                                                                      param.toString().string());
-                status = thread->setParameters(param.toString());
+                    ALOGV("createAudioPatch() AUDIO_PORT_TYPE_DEVICE setParameters %s",
+                                                                          param.toString().string());
+                    status = thread->setParameters(param.toString());
+                }
             }
         } break;
         case AUDIO_PORT_TYPE_MIX: {
@@ -245,18 +315,21 @@
             ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(src_module);
             if (index < 0) {
                 ALOGW("createAudioPatch() bad src hw module %d", src_module);
-                return BAD_VALUE;
+                status = BAD_VALUE;
+                goto exit;
             }
             // limit to connections between devices and output streams
             for (unsigned int i = 0; i < patch->num_sinks; i++) {
                 if (patch->sinks[i].type != AUDIO_PORT_TYPE_DEVICE) {
-                    ALOGW("createAudioPatch() invalid sink type %d for bus source",
+                    ALOGW("createAudioPatch() invalid sink type %d for mix source",
                           patch->sinks[i].type);
-                    return BAD_VALUE;
+                    status = BAD_VALUE;
+                    goto exit;
                 }
                 // limit to connections between sinks and sources on same HW module
                 if (patch->sinks[i].ext.device.hw_module != src_module) {
-                    return BAD_VALUE;
+                    status = BAD_VALUE;
+                    goto exit;
                 }
             }
             AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
@@ -265,7 +338,8 @@
             if (thread == 0) {
                 ALOGW("createAudioPatch() bad playback I/O handle %d",
                           patch->sources[0].ext.mix.handle);
-                return BAD_VALUE;
+                status = BAD_VALUE;
+                goto exit;
             }
             if (audioHwDevice->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
                 status = thread->sendCreateAudioPatchConfigEvent(patch, &halHandle);
@@ -281,20 +355,162 @@
 
         } break;
         default:
-            return BAD_VALUE;
+            status = BAD_VALUE;
+            goto exit;
     }
+exit:
     ALOGV("createAudioPatch() status %d", status);
     if (status == NO_ERROR) {
         *handle = audioflinger->nextUniqueId();
-        Patch *newPatch = new Patch(patch);
         newPatch->mHandle = *handle;
         newPatch->mHalHandle = halHandle;
         mPatches.add(newPatch);
         ALOGV("createAudioPatch() added new patch handle %d halHandle %d", *handle, halHandle);
+    } else {
+        clearPatchConnections(newPatch);
+        delete newPatch;
     }
     return status;
 }
 
+status_t AudioFlinger::PatchPanel::createPatchConnections(Patch *patch,
+                                                          const struct audio_patch *audioPatch)
+{
+    // create patch from source device to record thread input
+    struct audio_patch subPatch;
+    subPatch.num_sources = 1;
+    subPatch.sources[0] = audioPatch->sources[0];
+    subPatch.num_sinks = 1;
+
+    patch->mRecordThread->getAudioPortConfig(&subPatch.sinks[0]);
+    subPatch.sinks[0].ext.mix.usecase.source = AUDIO_SOURCE_MIC;
+
+    status_t status = createAudioPatch(&subPatch, &patch->mRecordPatchHandle);
+    if (status != NO_ERROR) {
+        patch->mRecordPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+        return status;
+    }
+
+    // create patch from playback thread output to sink device
+    patch->mPlaybackThread->getAudioPortConfig(&subPatch.sources[0]);
+    subPatch.sinks[0] = audioPatch->sinks[0];
+    status = createAudioPatch(&subPatch, &patch->mPlaybackPatchHandle);
+    if (status != NO_ERROR) {
+        patch->mPlaybackPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+        return status;
+    }
+
+    // use a pseudo LCM between input and output framecount
+    size_t playbackFrameCount = patch->mPlaybackThread->frameCount();
+    int playbackShift = __builtin_ctz(playbackFrameCount);
+    size_t recordFramecount = patch->mRecordThread->frameCount();
+    int shift = __builtin_ctz(recordFramecount);
+    if (playbackShift < shift) {
+        shift = playbackShift;
+    }
+    size_t frameCount = (playbackFrameCount * recordFramecount) >> shift;
+    ALOGV("createPatchConnections() playframeCount %d recordFramecount %d frameCount %d ",
+          playbackFrameCount, recordFramecount, frameCount);
+
+    // create a special record track to capture from record thread
+    uint32_t channelCount = patch->mPlaybackThread->channelCount();
+    audio_channel_mask_t inChannelMask = audio_channel_in_mask_from_count(channelCount);
+    audio_channel_mask_t outChannelMask = patch->mPlaybackThread->channelMask();
+    uint32_t sampleRate = patch->mPlaybackThread->sampleRate();
+    audio_format_t format = patch->mPlaybackThread->format();
+
+    patch->mPatchRecord = new RecordThread::PatchRecord(
+                                             patch->mRecordThread.get(),
+                                             sampleRate,
+                                             inChannelMask,
+                                             format,
+                                             frameCount,
+                                             NULL,
+                                             IAudioFlinger::TRACK_DEFAULT);
+    if (patch->mPatchRecord == 0) {
+        return NO_MEMORY;
+    }
+    status = patch->mPatchRecord->initCheck();
+    if (status != NO_ERROR) {
+        return status;
+    }
+    patch->mRecordThread->addPatchRecord(patch->mPatchRecord);
+
+    // create a special playback track to render to playback thread.
+    // this track is given the same buffer as the PatchRecord buffer
+    patch->mPatchTrack = new PlaybackThread::PatchTrack(
+                                           patch->mPlaybackThread.get(),
+                                           sampleRate,
+                                           outChannelMask,
+                                           format,
+                                           frameCount,
+                                           patch->mPatchRecord->buffer(),
+                                           IAudioFlinger::TRACK_DEFAULT);
+    if (patch->mPatchTrack == 0) {
+        return NO_MEMORY;
+    }
+    status = patch->mPatchTrack->initCheck();
+    if (status != NO_ERROR) {
+        return status;
+    }
+    patch->mPlaybackThread->addPatchTrack(patch->mPatchTrack);
+
+    // tie playback and record tracks together
+    patch->mPatchRecord->setPeerProxy(patch->mPatchTrack.get());
+    patch->mPatchTrack->setPeerProxy(patch->mPatchRecord.get());
+
+    // start capture and playback
+    patch->mPatchRecord->start(AudioSystem::SYNC_EVENT_NONE, 0);
+    patch->mPatchTrack->start();
+
+    return status;
+}
+
+void AudioFlinger::PatchPanel::clearPatchConnections(Patch *patch)
+{
+    sp<AudioFlinger> audioflinger = mAudioFlinger.promote();
+    if (audioflinger == 0) {
+        return;
+    }
+
+    ALOGV("clearPatchConnections() patch->mRecordPatchHandle %d patch->mPlaybackPatchHandle %d",
+          patch->mRecordPatchHandle, patch->mPlaybackPatchHandle);
+
+    if (patch->mPatchRecord != 0) {
+        patch->mPatchRecord->stop();
+    }
+    if (patch->mPatchTrack != 0) {
+        patch->mPatchTrack->stop();
+    }
+    if (patch->mRecordPatchHandle != AUDIO_PATCH_HANDLE_NONE) {
+        releaseAudioPatch(patch->mRecordPatchHandle);
+        patch->mRecordPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+    }
+    if (patch->mPlaybackPatchHandle != AUDIO_PATCH_HANDLE_NONE) {
+        releaseAudioPatch(patch->mPlaybackPatchHandle);
+        patch->mPlaybackPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+    }
+    if (patch->mRecordThread != 0) {
+        if (patch->mPatchRecord != 0) {
+            patch->mRecordThread->deletePatchRecord(patch->mPatchRecord);
+            patch->mPatchRecord.clear();
+        }
+        audioflinger->closeInputInternal_l(patch->mRecordThread);
+        patch->mRecordThread.clear();
+    }
+    if (patch->mPlaybackThread != 0) {
+        if (patch->mPatchTrack != 0) {
+            patch->mPlaybackThread->deletePatchTrack(patch->mPatchTrack);
+            patch->mPatchTrack.clear();
+        }
+        // if num sources == 2 we are reusing an existing playback thread so we do not close it
+        if (patch->mAudioPatch.num_sources != 2) {
+            audioflinger->closeOutputInternal_l(patch->mPlaybackThread);
+        }
+        patch->mPlaybackThread.clear();
+    }
+}
+
 /* Disconnect a patch */
 status_t AudioFlinger::PatchPanel::releaseAudioPatch(audio_patch_handle_t handle)
 {
@@ -315,8 +531,10 @@
     if (index == mPatches.size()) {
         return BAD_VALUE;
     }
+    Patch *removedPatch = mPatches[index];
+    mPatches.removeAt(index);
 
-    struct audio_patch *patch = &mPatches[index]->mAudioPatch;
+    struct audio_patch *patch = &removedPatch->mAudioPatch;
 
     switch (patch->sources[0].type) {
         case AUDIO_PORT_TYPE_DEVICE: {
@@ -327,13 +545,20 @@
                 status = BAD_VALUE;
                 break;
             }
+
+            if (patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE &&
+                    patch->sinks[0].ext.device.hw_module != src_module) {
+                clearPatchConnections(removedPatch);
+                break;
+            }
+
             AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
             if (audioHwDevice->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
                 if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
                     sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
                                                                     patch->sinks[0].ext.mix.handle);
                     if (thread == 0) {
-                        ALOGW("createAudioPatch() bad capture I/O handle %d",
+                        ALOGW("releaseAudioPatch() bad capture I/O handle %d",
                                                                   patch->sinks[0].ext.mix.handle);
                         status = BAD_VALUE;
                         break;
@@ -389,8 +614,7 @@
             break;
     }
 
-    delete (mPatches[index]);
-    mPatches.removeAt(index);
+    delete removedPatch;
     return status;
 }
 
diff --git a/services/audioflinger/PatchPanel.h b/services/audioflinger/PatchPanel.h
index 7f78621..e31179c 100644
--- a/services/audioflinger/PatchPanel.h
+++ b/services/audioflinger/PatchPanel.h
@@ -21,6 +21,9 @@
 
 class PatchPanel : public RefBase {
 public:
+
+    class Patch;
+
     PatchPanel(const sp<AudioFlinger>& audioFlinger);
     virtual ~PatchPanel();
 
@@ -45,16 +48,31 @@
     /* Set audio port configuration */
     status_t setAudioPortConfig(const struct audio_port_config *config);
 
+    status_t createPatchConnections(Patch *patch,
+                                    const struct audio_patch *audioPatch);
+    void clearPatchConnections(Patch *patch);
+
     class Patch {
     public:
         Patch(const struct audio_patch *patch) :
-            mAudioPatch(*patch), mHandle(0), mHalHandle(0) {}
+            mAudioPatch(*patch), mHandle(AUDIO_PATCH_HANDLE_NONE),
+            mHalHandle(AUDIO_PATCH_HANDLE_NONE), mRecordPatchHandle(AUDIO_PATCH_HANDLE_NONE),
+            mPlaybackPatchHandle(AUDIO_PATCH_HANDLE_NONE) {}
+        ~Patch() {}
 
-        struct audio_patch mAudioPatch;
-        audio_patch_handle_t mHandle;
-        audio_patch_handle_t mHalHandle;
+        struct audio_patch              mAudioPatch;
+        audio_patch_handle_t            mHandle;
+        audio_patch_handle_t            mHalHandle;
+        sp<PlaybackThread>              mPlaybackThread;
+        sp<PlaybackThread::PatchTrack>  mPatchTrack;
+        sp<RecordThread>                mRecordThread;
+        sp<RecordThread::PatchRecord>   mPatchRecord;
+        audio_patch_handle_t            mRecordPatchHandle;
+        audio_patch_handle_t            mPlaybackPatchHandle;
+
     };
+
 private:
-    const wp<AudioFlinger>  mAudioFlinger;
-    SortedVector <Patch *> mPatches;
+    const wp<AudioFlinger>      mAudioFlinger;
+    SortedVector <Patch *>      mPatches;
 };
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 79bdfe8..ee48276 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -29,10 +29,12 @@
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
                                 size_t frameCount,
+                                void *buffer,
                                 const sp<IMemory>& sharedBuffer,
                                 int sessionId,
                                 int uid,
-                                IAudioFlinger::track_flags_t flags);
+                                IAudioFlinger::track_flags_t flags,
+                                track_type type);
     virtual             ~Track();
     virtual status_t    initCheck() const;
 
@@ -100,10 +102,6 @@
     bool isResumePending();
     void resumeAck();
 
-    bool isOutputTrack() const {
-        return (mStreamType == AUDIO_STREAM_CNT);
-    }
-
     sp<IMemory> sharedBuffer() const { return mSharedBuffer; }
 
     // framesWritten is cumulative, never reset, and is shared all tracks
@@ -115,7 +113,6 @@
     void triggerEvents(AudioSystem::sync_event_t type);
     void invalidate();
     bool isInvalid() const { return mIsInvalid; }
-    virtual bool isTimedTrack() const { return false; }
     int fastIndex() const { return mFastIndex; }
 
 protected:
@@ -163,7 +160,6 @@
     bool                mPreviousValid;
     uint32_t            mPreviousFramesWritten;
     AudioTimestamp      mPreviousTimestamp;
-
 };  // end of Track
 
 class TimedTrack : public Track {
@@ -195,7 +191,6 @@
     };
 
     // Mixer facing methods.
-    virtual bool isTimedTrack() const { return true; }
     virtual size_t framesReady() const;
 
     // AudioBufferProvider interface
@@ -296,3 +291,34 @@
     DuplicatingThread* const mSourceThread; // for waitTimeMs() in write()
     AudioTrackClientProxy*      mClientProxy;
 };  // end of OutputTrack
+
+// playback track, used by PatchPanel
+class PatchTrack : public Track, public PatchProxyBufferProvider {
+public:
+
+                        PatchTrack(PlaybackThread *playbackThread,
+                                   uint32_t sampleRate,
+                                   audio_channel_mask_t channelMask,
+                                   audio_format_t format,
+                                   size_t frameCount,
+                                   void *buffer,
+                                   IAudioFlinger::track_flags_t flags);
+    virtual             ~PatchTrack();
+
+    // AudioBufferProvider interface
+    virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer,
+                                   int64_t pts);
+    virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
+
+    // PatchProxyBufferProvider interface
+    virtual status_t    obtainBuffer(Proxy::Buffer* buffer,
+                                     const struct timespec *timeOut = NULL);
+    virtual void        releaseBuffer(Proxy::Buffer* buffer);
+
+            void setPeerProxy(PatchProxyBufferProvider *proxy) { mPeerProxy = proxy; }
+
+private:
+    sp<ClientProxy>             mProxy;
+    PatchProxyBufferProvider*   mPeerProxy;
+    struct timespec             mPeerTimeout;
+};  // end of PatchTrack
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index fe15571..204a9d6 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -28,9 +28,11 @@
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
                                 size_t frameCount,
+                                void *buffer,
                                 int sessionId,
                                 int uid,
-                                IAudioFlinger::track_flags_t flags);
+                                IAudioFlinger::track_flags_t flags,
+                                track_type type);
     virtual             ~RecordTrack();
 
     virtual status_t    start(AudioSystem::sync_event_t event, int triggerSession);
@@ -93,3 +95,34 @@
             // used by resampler to find source frames
             ResamplerBufferProvider *mResamplerBufferProvider;
 };
+
+// playback track, used by PatchPanel
+class PatchRecord : virtual public RecordTrack, public PatchProxyBufferProvider {
+public:
+
+    PatchRecord(RecordThread *recordThread,
+                uint32_t sampleRate,
+                audio_channel_mask_t channelMask,
+                audio_format_t format,
+                size_t frameCount,
+                void *buffer,
+                IAudioFlinger::track_flags_t flags);
+    virtual             ~PatchRecord();
+
+    // AudioBufferProvider interface
+    virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer,
+                                   int64_t pts);
+    virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
+
+    // PatchProxyBufferProvider interface
+    virtual status_t    obtainBuffer(Proxy::Buffer *buffer,
+                                     const struct timespec *timeOut = NULL);
+    virtual void        releaseBuffer(Proxy::Buffer *buffer);
+
+    void setPeerProxy(PatchProxyBufferProvider *proxy) { mPeerProxy = proxy; }
+
+private:
+    sp<ClientProxy>             mProxy;
+    PatchProxyBufferProvider*   mPeerProxy;
+    struct timespec             mPeerTimeout;
+};  // end of PatchRecord
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index e0b664b..c3aafd9 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -910,6 +910,15 @@
         goto Exit;
     }
 
+    // Reject any effect on multichannel sinks.
+    // TODO: fix both format and multichannel issues with effects.
+    if (mChannelCount != FCC_2) {
+        ALOGW("createEffect_l() Cannot add effect %s for multichannel(%d) thread",
+                desc->name, mChannelCount);
+        lStatus = BAD_VALUE;
+        goto Exit;
+    }
+
     // Allow global effects only on offloaded and mixer threads
     if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
         switch (mType) {
@@ -1146,6 +1155,18 @@
     }
 }
 
+void AudioFlinger::ThreadBase::getAudioPortConfig(struct audio_port_config *config)
+{
+    config->type = AUDIO_PORT_TYPE_MIX;
+    config->ext.mix.handle = mId;
+    config->sample_rate = mSampleRate;
+    config->format = mFormat;
+    config->channel_mask = mChannelMask;
+    config->config_mask = AUDIO_PORT_CONFIG_SAMPLE_RATE|AUDIO_PORT_CONFIG_CHANNEL_MASK|
+                            AUDIO_PORT_CONFIG_FORMAT;
+}
+
+
 // ----------------------------------------------------------------------------
 //      Playback
 // ----------------------------------------------------------------------------
@@ -1376,9 +1397,10 @@
             ) &&
             // PCM data
             audio_is_linear_pcm(format) &&
-            // mono or stereo
-            ( (channelMask == AUDIO_CHANNEL_OUT_MONO) ||
-              (channelMask == AUDIO_CHANNEL_OUT_STEREO) ) &&
+            // identical channel mask to sink, or mono in and stereo sink
+            (channelMask == mChannelMask ||
+                    (channelMask == AUDIO_CHANNEL_OUT_MONO &&
+                            mChannelMask == AUDIO_CHANNEL_OUT_STEREO)) &&
             // hardware sample rate
             (sampleRate == mSampleRate) &&
             // normal mixer has an associated fast mixer
@@ -1482,7 +1504,7 @@
         uint32_t strategy = AudioSystem::getStrategyForStream(streamType);
         for (size_t i = 0; i < mTracks.size(); ++i) {
             sp<Track> t = mTracks[i];
-            if (t != 0 && !t->isOutputTrack()) {
+            if (t != 0 && t->isExternalTrack()) {
                 uint32_t actual = AudioSystem::getStrategyForStream(t->streamType());
                 if (sessionId == t->sessionId() && strategy != actual) {
                     ALOGE("createTrack_l() mismatched strategy; expected %u but found %u",
@@ -1495,7 +1517,8 @@
 
         if (!isTimed) {
             track = new Track(this, client, streamType, sampleRate, format,
-                    channelMask, frameCount, sharedBuffer, sessionId, uid, *flags);
+                              channelMask, frameCount, NULL, sharedBuffer,
+                              sessionId, uid, *flags, TrackBase::TYPE_DEFAULT);
         } else {
             track = TimedTrack::create(this, client, streamType, sampleRate, format,
                     channelMask, frameCount, sharedBuffer, sessionId, uid);
@@ -1608,7 +1631,7 @@
         // the track is newly added, make sure it fills up all its
         // buffers before playing. This is to ensure the client will
         // effectively get the latency it requested.
-        if (!track->isOutputTrack()) {
+        if (track->isExternalTrack()) {
             TrackBase::track_state state = track->mState;
             mLock.unlock();
             status = AudioSystem::startOutput(mId, track->streamType(), track->sessionId());
@@ -1801,9 +1824,10 @@
     if (!audio_is_output_channel(mChannelMask)) {
         LOG_ALWAYS_FATAL("HAL channel mask %#x not valid for output", mChannelMask);
     }
-    if ((mType == MIXER || mType == DUPLICATING) && mChannelMask != AUDIO_CHANNEL_OUT_STEREO) {
-        LOG_ALWAYS_FATAL("HAL channel mask %#x not supported for mixed output; "
-                "must be AUDIO_CHANNEL_OUT_STEREO", mChannelMask);
+    if ((mType == MIXER || mType == DUPLICATING)
+            && !isValidPcmSinkChannelMask(mChannelMask)) {
+        LOG_ALWAYS_FATAL("HAL channel mask %#x not supported for mixed output",
+                mChannelMask);
     }
     mChannelCount = audio_channel_count_from_out_mask(mChannelMask);
     mHALFormat = mOutput->stream->common.get_format(&mOutput->stream->common);
@@ -2044,7 +2068,7 @@
     if (count > 0) {
         for (size_t i = 0 ; i < count ; i++) {
             const sp<Track>& track = tracksToRemove.itemAt(i);
-            if (!track->isOutputTrack()) {
+            if (track->isExternalTrack()) {
                 AudioSystem::stopOutput(mId, track->streamType(), track->sessionId());
 #ifdef ADD_BATTERY_DATA
                 // to track the speaker usage
@@ -2713,6 +2737,26 @@
     return status;
 }
 
+void AudioFlinger::PlaybackThread::addPatchTrack(const sp<PatchTrack>& track)
+{
+    Mutex::Autolock _l(mLock);
+    mTracks.add(track);
+}
+
+void AudioFlinger::PlaybackThread::deletePatchTrack(const sp<PatchTrack>& track)
+{
+    Mutex::Autolock _l(mLock);
+    destroyTrack_l(track);
+}
+
+void AudioFlinger::PlaybackThread::getAudioPortConfig(struct audio_port_config *config)
+{
+    ThreadBase::getAudioPortConfig(config);
+    config->role = AUDIO_PORT_ROLE_SOURCE;
+    config->ext.mix.hw_module = mOutput->audioHwDev->handle();
+    config->ext.mix.usecase.stream = AUDIO_STREAM_DEFAULT;
+}
+
 // ----------------------------------------------------------------------------
 
 AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
@@ -2732,11 +2776,6 @@
             mNormalFrameCount);
     mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate);
 
-    // FIXME - Current mixer implementation only supports stereo output
-    if (mChannelCount != FCC_2) {
-        ALOGE("Invalid audio hardware channel count %d", mChannelCount);
-    }
-
     // create an NBAIO sink for the HAL output stream, and negotiate
     mOutputSink = new AudioStreamOutSink(output->stream);
     size_t numCounterOffers = 0;
@@ -3459,6 +3498,10 @@
                 name,
                 AudioMixer::TRACK,
                 AudioMixer::CHANNEL_MASK, (void *)(uintptr_t)track->channelMask());
+            mAudioMixer->setParameter(
+                name,
+                AudioMixer::TRACK,
+                AudioMixer::MIXER_CHANNEL_MASK, (void *)(uintptr_t)mChannelMask);
             // limit track sample rate to 2 x output sample rate, which changes at re-configuration
             uint32_t maxSampleRate = mSampleRate * 2;
             uint32_t reqSampleRate = track->mAudioTrackServerProxy->getSampleRate();
@@ -3697,7 +3740,7 @@
         reconfig = true;
     }
     if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) {
-        if ((audio_format_t) value != AUDIO_FORMAT_PCM_16_BIT) {
+        if (!isValidPcmSinkFormat((audio_format_t) value)) {
             status = BAD_VALUE;
         } else {
             // no need to save value, since it's constant
@@ -3705,7 +3748,7 @@
         }
     }
     if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) {
-        if ((audio_channel_mask_t) value != AUDIO_CHANNEL_OUT_STEREO) {
+        if (!isValidPcmSinkChannelMask((audio_channel_mask_t) value)) {
             status = BAD_VALUE;
         } else {
             // no need to save value, since it's constant
@@ -5523,8 +5566,8 @@
         Mutex::Autolock _l(mLock);
 
         track = new RecordTrack(this, client, sampleRate,
-                      format, channelMask, frameCount, sessionId, uid,
-                      *flags);
+                      format, channelMask, frameCount, NULL, sessionId, uid,
+                      *flags, TrackBase::TYPE_DEFAULT);
 
         lStatus = track->initCheck();
         if (lStatus != NO_ERROR) {
@@ -5601,15 +5644,19 @@
         recordTrack->mState = TrackBase::STARTING_1;
         mActiveTracks.add(recordTrack);
         mActiveTracksGen++;
-        mLock.unlock();
-        status_t status = AudioSystem::startInput(mId);
-        mLock.lock();
-        // FIXME should verify that recordTrack is still in mActiveTracks
-        if (status != NO_ERROR) {
-            mActiveTracks.remove(recordTrack);
-            mActiveTracksGen++;
-            recordTrack->clearSyncStartEvent();
-            return status;
+        status_t status = NO_ERROR;
+        if (recordTrack->isExternalTrack()) {
+            mLock.unlock();
+            status = AudioSystem::startInput(mId);
+            mLock.lock();
+            // FIXME should verify that recordTrack is still in mActiveTracks
+            if (status != NO_ERROR) {
+                mActiveTracks.remove(recordTrack);
+                mActiveTracksGen++;
+                recordTrack->clearSyncStartEvent();
+                ALOGV("RecordThread::start error %d", status);
+                return status;
+            }
         }
         // Catch up with current buffer indices if thread is already running.
         // This is what makes a new client discard all buffered data.  If the track's mRsmpInFront
@@ -5634,7 +5681,9 @@
     }
 
 startError:
-    AudioSystem::stopInput(mId);
+    if (recordTrack->isExternalTrack()) {
+        AudioSystem::stopInput(mId);
+    }
     recordTrack->clearSyncStartEvent();
     // FIXME I wonder why we do not reset the state here?
     return status;
@@ -6177,5 +6226,24 @@
     return status;
 }
 
+void AudioFlinger::RecordThread::addPatchRecord(const sp<PatchRecord>& record)
+{
+    Mutex::Autolock _l(mLock);
+    mTracks.add(record);
+}
+
+void AudioFlinger::RecordThread::deletePatchRecord(const sp<PatchRecord>& record)
+{
+    Mutex::Autolock _l(mLock);
+    destroyTrack_l(record);
+}
+
+void AudioFlinger::RecordThread::getAudioPortConfig(struct audio_port_config *config)
+{
+    ThreadBase::getAudioPortConfig(config);
+    config->role = AUDIO_PORT_ROLE_SINK;
+    config->ext.mix.hw_module = mInput->audioHwDev->handle();
+    config->ext.mix.usecase.source = mAudioSource;
+}
 
 }; // namespace android
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 3b7257b..648502b 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -235,6 +235,7 @@
                 uint32_t    sampleRate() const { return mSampleRate; }
                 audio_channel_mask_t channelMask() const { return mChannelMask; }
                 audio_format_t format() const { return mHALFormat; }
+                uint32_t channelCount() const { return mChannelCount; }
                 // Called by AudioFlinger::frameCount(audio_io_handle_t output) and effects,
                 // and returns the [normal mix] buffer's frame count.
     virtual     size_t      frameCount() const = 0;
@@ -264,6 +265,7 @@
     virtual     status_t    createAudioPatch_l(const struct audio_patch *patch,
                                                audio_patch_handle_t *handle) = 0;
     virtual     status_t    releaseAudioPatch_l(const audio_patch_handle_t handle) = 0;
+    virtual     void        getAudioPortConfig(struct audio_port_config *config) = 0;
 
 
                 // see note at declaration of mStandby, mOutDevice and mInDevice
@@ -589,7 +591,12 @@
                 // Return's the HAL's frame count i.e. fast mixer buffer size.
                 size_t      frameCountHAL() const { return mFrameCount; }
 
-                status_t         getTimestamp_l(AudioTimestamp& timestamp);
+                status_t    getTimestamp_l(AudioTimestamp& timestamp);
+
+                void        addPatchTrack(const sp<PatchTrack>& track);
+                void        deletePatchTrack(const sp<PatchTrack>& track);
+
+    virtual     void        getAudioPortConfig(struct audio_port_config *config);
 
 protected:
     // updated by readOutputParameters_l()
@@ -876,6 +883,7 @@
                               ALOG_ASSERT(fastIndex < FastMixerState::kMaxFastTracks);
                               return mFastMixerDumpState.mTracks[fastIndex].mUnderruns;
                             }
+
 };
 
 class DirectOutputThread : public PlaybackThread {
@@ -1103,6 +1111,10 @@
     virtual status_t    createAudioPatch_l(const struct audio_patch *patch,
                                            audio_patch_handle_t *handle);
     virtual status_t    releaseAudioPatch_l(const audio_patch_handle_t handle);
+
+            void        addPatchRecord(const sp<PatchRecord>& record);
+            void        deletePatchRecord(const sp<PatchRecord>& record);
+
             void        readInputParameters_l();
     virtual uint32_t    getInputFramesLost();
 
@@ -1122,6 +1134,7 @@
 
     virtual size_t      frameCount() const { return mFrameCount; }
             bool        hasFastCapture() const { return mFastCapture != 0; }
+    virtual void        getAudioPortConfig(struct audio_port_config *config);
 
 private:
             // Enter standby if not already in standby, and set mStandby flag
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index 4cba3fd..864daa5 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -44,6 +44,15 @@
         ALLOC_CBLK,     // allocate immediately after control block
         ALLOC_READONLY, // allocate from a separate read-only heap per thread
         ALLOC_PIPE,     // do not allocate; use the pipe buffer
+        ALLOC_LOCAL,    // allocate a local buffer
+        ALLOC_NONE,     // do not allocate:use the buffer passed to TrackBase constructor
+    };
+
+    enum track_type {
+        TYPE_DEFAULT,
+        TYPE_TIMED,
+        TYPE_OUTPUT,
+        TYPE_PATCH,
     };
 
                         TrackBase(ThreadBase *thread,
@@ -52,14 +61,15 @@
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
                                 size_t frameCount,
-                                const sp<IMemory>& sharedBuffer,
+                                void *buffer,
                                 int sessionId,
                                 int uid,
                                 IAudioFlinger::track_flags_t flags,
                                 bool isOut,
-                                alloc_type alloc = ALLOC_CBLK);
+                                alloc_type alloc = ALLOC_CBLK,
+                                track_type type = TYPE_DEFAULT);
     virtual             ~TrackBase();
-    virtual status_t    initCheck() const { return getCblk() != 0 ? NO_ERROR : NO_MEMORY; }
+    virtual status_t    initCheck() const;
 
     virtual status_t    start(AudioSystem::sync_event_t event,
                              int triggerSession) = 0;
@@ -71,7 +81,12 @@
     virtual status_t    setSyncEvent(const sp<SyncEvent>& event);
 
             sp<IMemory> getBuffers() const { return mBufferMemory; }
+            void*       buffer() const { return mBuffer; }
             bool        isFastTrack() const { return (mFlags & IAudioFlinger::TRACK_FAST) != 0; }
+            bool        isTimedTrack() const { return (mType == TYPE_TIMED); }
+            bool        isOutputTrack() const { return (mType == TYPE_OUTPUT); }
+            bool        isPatchTrack() const { return (mType == TYPE_PATCH); }
+            bool        isExternalTrack() const { return !isOutputTrack() && !isPatchTrack(); }
 
 protected:
                         TrackBase(const TrackBase&);
@@ -150,4 +165,18 @@
     sp<NBAIO_Sink>      mTeeSink;
     sp<NBAIO_Source>    mTeeSource;
     bool                mTerminated;
+    track_type          mType;      // must be one of TYPE_DEFAULT, TYPE_OUTPUT, TYPE_PATCH ...
+};
+
+// PatchProxyBufferProvider interface is implemented by PatchTrack and PatchRecord.
+// it provides buffer access methods that map those of a ClientProxy (see AudioTrackShared.h)
+class PatchProxyBufferProvider
+{
+public:
+
+    virtual ~PatchProxyBufferProvider() {}
+
+    virtual status_t    obtainBuffer(Proxy::Buffer* buffer,
+                                     const struct timespec *requested = NULL) = 0;
+    virtual void        releaseBuffer(Proxy::Buffer* buffer) = 0;
 };
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index af761e4..e81697f 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -68,12 +68,13 @@
             audio_format_t format,
             audio_channel_mask_t channelMask,
             size_t frameCount,
-            const sp<IMemory>& sharedBuffer,
+            void *buffer,
             int sessionId,
             int clientUid,
             IAudioFlinger::track_flags_t flags,
             bool isOut,
-            alloc_type alloc)
+            alloc_type alloc,
+            track_type type)
     :   RefBase(),
         mThread(thread),
         mClient(client),
@@ -94,7 +95,8 @@
         mIsOut(isOut),
         mServerProxy(NULL),
         mId(android_atomic_inc(&nextTrackId)),
-        mTerminated(false)
+        mTerminated(false),
+        mType(type)
 {
     // if the caller is us, trust the specified uid
     if (IPCThreadState::self()->getCallingPid() != getpid_cached || clientUid == -1) {
@@ -108,16 +110,10 @@
     // battery usage on it.
     mUid = clientUid;
 
-    // client == 0 implies sharedBuffer == 0
-    ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
-
-    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
-            sharedBuffer->size());
-
     // ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize);
     size_t size = sizeof(audio_track_cblk_t);
-    size_t bufferSize = (sharedBuffer == 0 ? roundup(frameCount) : frameCount) * mFrameSize;
-    if (sharedBuffer == 0 && alloc == ALLOC_CBLK) {
+    size_t bufferSize = (buffer == NULL ? roundup(frameCount) : frameCount) * mFrameSize;
+    if (buffer == NULL && alloc == ALLOC_CBLK) {
         size += bufferSize;
     }
 
@@ -166,16 +162,22 @@
             break;
         case ALLOC_CBLK:
             // clear all buffers
-            if (sharedBuffer == 0) {
+            if (buffer == NULL) {
                 mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
                 memset(mBuffer, 0, bufferSize);
             } else {
-                mBuffer = sharedBuffer->pointer();
+                mBuffer = buffer;
 #if 0
                 mCblk->mFlags = CBLK_FORCEREADY;    // FIXME hack, need to fix the track ready logic
 #endif
             }
             break;
+        case ALLOC_LOCAL:
+            mBuffer = calloc(1, bufferSize);
+            break;
+        case ALLOC_NONE:
+            mBuffer = buffer;
+            break;
         }
 
 #ifdef TEE_SINK
@@ -200,6 +202,17 @@
     }
 }
 
+status_t AudioFlinger::ThreadBase::TrackBase::initCheck() const
+{
+    status_t status;
+    if (mType == TYPE_OUTPUT || mType == TYPE_PATCH) {
+        status = cblk() != NULL ? NO_ERROR : NO_MEMORY;
+    } else {
+        status = getCblk() != 0 ? NO_ERROR : NO_MEMORY;
+    }
+    return status;
+}
+
 AudioFlinger::ThreadBase::TrackBase::~TrackBase()
 {
 #ifdef TEE_SINK
@@ -364,12 +377,17 @@
             audio_format_t format,
             audio_channel_mask_t channelMask,
             size_t frameCount,
+            void *buffer,
             const sp<IMemory>& sharedBuffer,
             int sessionId,
             int uid,
-            IAudioFlinger::track_flags_t flags)
-    :   TrackBase(thread, client, sampleRate, format, channelMask, frameCount, sharedBuffer,
-            sessionId, uid, flags, true /*isOut*/),
+            IAudioFlinger::track_flags_t flags,
+            track_type type)
+    :   TrackBase(thread, client, sampleRate, format, channelMask, frameCount,
+                  (sharedBuffer != 0) ? sharedBuffer->pointer() : buffer,
+                  sessionId, uid, flags, true /*isOut*/,
+                  (type == TYPE_PATCH) ? ( buffer == NULL ? ALLOC_LOCAL : ALLOC_NONE) : ALLOC_CBLK,
+                  type),
     mFillingUpStatus(FS_INVALID),
     // mRetryCount initialized later when needed
     mSharedBuffer(sharedBuffer),
@@ -389,13 +407,19 @@
     mPreviousFramesWritten(0)
     // mPreviousTimestamp
 {
+    // client == 0 implies sharedBuffer == 0
+    ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
+
+    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
+            sharedBuffer->size());
+
     if (mCblk == NULL) {
         return;
     }
 
     if (sharedBuffer == 0) {
         mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount,
-                mFrameSize);
+                mFrameSize, !isExternalTrack(), sampleRate);
     } else {
         mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount,
                 mFrameSize);
@@ -463,7 +487,7 @@
             Mutex::Autolock _l(thread->mLock);
             PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
             bool wasActive = playbackThread->destroyTrack_l(this);
-            if (!isOutputTrack() && !wasActive) {
+            if (isExternalTrack() && !wasActive) {
                 AudioSystem::releaseOutput(thread->id());
             }
         }
@@ -1122,7 +1146,8 @@
             int sessionId,
             int uid)
     : Track(thread, client, streamType, sampleRate, format, channelMask,
-            frameCount, sharedBuffer, sessionId, uid, IAudioFlinger::TRACK_TIMED),
+            frameCount, (sharedBuffer != 0) ? sharedBuffer->pointer() : NULL, sharedBuffer,
+                    sessionId, uid, IAudioFlinger::TRACK_TIMED, TYPE_TIMED),
       mQueueHeadInFlight(false),
       mTrimQueueHeadOnRelease(false),
       mFramesPendingInQueue(0),
@@ -1617,7 +1642,7 @@
             size_t frameCount,
             int uid)
     :   Track(playbackThread, NULL, AUDIO_STREAM_CNT, sampleRate, format, channelMask, frameCount,
-                NULL, 0, uid, IAudioFlinger::TRACK_DEFAULT),
+                NULL, 0, 0, uid, IAudioFlinger::TRACK_DEFAULT, TYPE_OUTPUT),
     mActive(false), mSourceThread(sourceThread), mClientProxy(NULL)
 {
 
@@ -1825,6 +1850,75 @@
 }
 
 
+AudioFlinger::PlaybackThread::PatchTrack::PatchTrack(PlaybackThread *playbackThread,
+                                                     uint32_t sampleRate,
+                                                     audio_channel_mask_t channelMask,
+                                                     audio_format_t format,
+                                                     size_t frameCount,
+                                                     void *buffer,
+                                                     IAudioFlinger::track_flags_t flags)
+    :   Track(playbackThread, NULL, AUDIO_STREAM_CNT, sampleRate, format, channelMask, frameCount,
+              buffer, 0, 0, getuid(), flags, TYPE_PATCH),
+              mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, true, true))
+{
+    uint64_t mixBufferNs = ((uint64_t)2 * playbackThread->frameCount() * 1000000000) /
+                                                                    playbackThread->sampleRate();
+    mPeerTimeout.tv_sec = mixBufferNs / 1000000000;
+    mPeerTimeout.tv_nsec = (int) (mixBufferNs % 1000000000);
+
+    ALOGV("PatchTrack %p sampleRate %d mPeerTimeout %d.%03d sec",
+                                      this, sampleRate,
+                                      (int)mPeerTimeout.tv_sec,
+                                      (int)(mPeerTimeout.tv_nsec / 1000000));
+}
+
+AudioFlinger::PlaybackThread::PatchTrack::~PatchTrack()
+{
+}
+
+// AudioBufferProvider interface
+status_t AudioFlinger::PlaybackThread::PatchTrack::getNextBuffer(
+        AudioBufferProvider::Buffer* buffer, int64_t pts)
+{
+    ALOG_ASSERT(mPeerProxy != 0, "PatchTrack::getNextBuffer() called without peer proxy");
+    Proxy::Buffer buf;
+    buf.mFrameCount = buffer->frameCount;
+    status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout);
+    ALOGV_IF(status != NO_ERROR, "PatchTrack() %p getNextBuffer status %d", this, status);
+    if (buf.mFrameCount == 0) {
+        return WOULD_BLOCK;
+    }
+    buffer->frameCount = buf.mFrameCount;
+    status = Track::getNextBuffer(buffer, pts);
+    return status;
+}
+
+void AudioFlinger::PlaybackThread::PatchTrack::releaseBuffer(AudioBufferProvider::Buffer* buffer)
+{
+    ALOG_ASSERT(mPeerProxy != 0, "PatchTrack::releaseBuffer() called without peer proxy");
+    Proxy::Buffer buf;
+    buf.mFrameCount = buffer->frameCount;
+    buf.mRaw = buffer->raw;
+    mPeerProxy->releaseBuffer(&buf);
+    TrackBase::releaseBuffer(buffer);
+}
+
+status_t AudioFlinger::PlaybackThread::PatchTrack::obtainBuffer(Proxy::Buffer* buffer,
+                                                                const struct timespec *timeOut)
+{
+    return mProxy->obtainBuffer(buffer, timeOut);
+}
+
+void AudioFlinger::PlaybackThread::PatchTrack::releaseBuffer(Proxy::Buffer* buffer)
+{
+    mProxy->releaseBuffer(buffer);
+    if (android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags) & CBLK_DISABLED) {
+        ALOGW("PatchTrack::releaseBuffer() disabled due to previous underrun, restarting");
+        start();
+    }
+    android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
+}
+
 // ----------------------------------------------------------------------------
 //      Record
 // ----------------------------------------------------------------------------
@@ -1872,13 +1966,18 @@
             audio_format_t format,
             audio_channel_mask_t channelMask,
             size_t frameCount,
+            void *buffer,
             int sessionId,
             int uid,
-            IAudioFlinger::track_flags_t flags)
+            IAudioFlinger::track_flags_t flags,
+            track_type type)
     :   TrackBase(thread, client, sampleRate, format,
-                  channelMask, frameCount, 0 /*sharedBuffer*/, sessionId, uid,
+                  channelMask, frameCount, buffer, sessionId, uid,
                   flags, false /*isOut*/,
-                  flags & IAudioFlinger::TRACK_FAST ? ALLOC_PIPE : ALLOC_CBLK),
+                  (type == TYPE_DEFAULT) ?
+                          ((flags & IAudioFlinger::TRACK_FAST) ? ALLOC_PIPE : ALLOC_CBLK) :
+                          ((buffer == NULL) ? ALLOC_LOCAL : ALLOC_NONE),
+                  type),
         mOverflow(false), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpOutFrameCount(0),
         // See real initialization of mRsmpInFront at RecordThread::start()
         mRsmpInUnrel(0), mRsmpInFront(0), mFramesToDrop(0), mResamplerBufferProvider(NULL)
@@ -1887,7 +1986,8 @@
         return;
     }
 
-    mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount, mFrameSize);
+    mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount,
+                                              mFrameSize, !isExternalTrack());
 
     uint32_t channelCount = audio_channel_count_from_in_mask(channelMask);
     // FIXME I don't understand either of the channel count checks
@@ -1949,7 +2049,7 @@
     sp<ThreadBase> thread = mThread.promote();
     if (thread != 0) {
         RecordThread *recordThread = (RecordThread *)thread.get();
-        if (recordThread->stop(this)) {
+        if (recordThread->stop(this) && isExternalTrack()) {
             AudioSystem::stopInput(recordThread->id());
         }
     }
@@ -1962,10 +2062,12 @@
     {
         sp<ThreadBase> thread = mThread.promote();
         if (thread != 0) {
-            if (mState == ACTIVE || mState == RESUMING) {
-                AudioSystem::stopInput(thread->id());
+            if (isExternalTrack()) {
+                if (mState == ACTIVE || mState == RESUMING) {
+                    AudioSystem::stopInput(thread->id());
+                }
+                AudioSystem::releaseInput(thread->id());
             }
-            AudioSystem::releaseInput(thread->id());
             Mutex::Autolock _l(thread->mLock);
             RecordThread *recordThread = (RecordThread *) thread.get();
             recordThread->destroyTrack_l(this);
@@ -2027,4 +2129,70 @@
     mFramesToDrop = 0;
 }
 
+
+AudioFlinger::RecordThread::PatchRecord::PatchRecord(RecordThread *recordThread,
+                                                     uint32_t sampleRate,
+                                                     audio_channel_mask_t channelMask,
+                                                     audio_format_t format,
+                                                     size_t frameCount,
+                                                     void *buffer,
+                                                     IAudioFlinger::track_flags_t flags)
+    :   RecordTrack(recordThread, NULL, sampleRate, format, channelMask, frameCount,
+                buffer, 0, getuid(), flags, TYPE_PATCH),
+                mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, false, true))
+{
+    uint64_t mixBufferNs = ((uint64_t)2 * recordThread->frameCount() * 1000000000) /
+                                                                recordThread->sampleRate();
+    mPeerTimeout.tv_sec = mixBufferNs / 1000000000;
+    mPeerTimeout.tv_nsec = (int) (mixBufferNs % 1000000000);
+
+    ALOGV("PatchRecord %p sampleRate %d mPeerTimeout %d.%03d sec",
+                                      this, sampleRate,
+                                      (int)mPeerTimeout.tv_sec,
+                                      (int)(mPeerTimeout.tv_nsec / 1000000));
+}
+
+AudioFlinger::RecordThread::PatchRecord::~PatchRecord()
+{
+}
+
+// AudioBufferProvider interface
+status_t AudioFlinger::RecordThread::PatchRecord::getNextBuffer(
+                                                  AudioBufferProvider::Buffer* buffer, int64_t pts)
+{
+    ALOG_ASSERT(mPeerProxy != 0, "PatchRecord::getNextBuffer() called without peer proxy");
+    Proxy::Buffer buf;
+    buf.mFrameCount = buffer->frameCount;
+    status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout);
+    ALOGV_IF(status != NO_ERROR,
+             "PatchRecord() %p mPeerProxy->obtainBuffer status %d", this, status);
+    if (buf.mFrameCount == 0) {
+        return WOULD_BLOCK;
+    }
+    buffer->frameCount = buf.mFrameCount;
+    status = RecordTrack::getNextBuffer(buffer, pts);
+    return status;
+}
+
+void AudioFlinger::RecordThread::PatchRecord::releaseBuffer(AudioBufferProvider::Buffer* buffer)
+{
+    ALOG_ASSERT(mPeerProxy != 0, "PatchRecord::releaseBuffer() called without peer proxy");
+    Proxy::Buffer buf;
+    buf.mFrameCount = buffer->frameCount;
+    buf.mRaw = buffer->raw;
+    mPeerProxy->releaseBuffer(&buf);
+    TrackBase::releaseBuffer(buffer);
+}
+
+status_t AudioFlinger::RecordThread::PatchRecord::obtainBuffer(Proxy::Buffer* buffer,
+                                                               const struct timespec *timeOut)
+{
+    return mProxy->obtainBuffer(buffer, timeOut);
+}
+
+void AudioFlinger::RecordThread::PatchRecord::releaseBuffer(Proxy::Buffer* buffer)
+{
+    mProxy->releaseBuffer(buffer);
+}
+
 }; // namespace android
diff --git a/services/audioflinger/tests/mixer_to_wav_tests.sh b/services/audioflinger/tests/mixer_to_wav_tests.sh
index 93bff47..9b39e77 100755
--- a/services/audioflinger/tests/mixer_to_wav_tests.sh
+++ b/services/audioflinger/tests/mixer_to_wav_tests.sh
@@ -72,9 +72,9 @@
 # track__Resample / track__genericResample
 # track__NoResample / track__16BitsStereo / track__16BitsMono
 # Aux buffer
-    adb shell test-mixer $1 -s 9307 \
+    adb shell test-mixer $1 -c 5 -s 9307 \
         -a /sdcard/aux9307gra.wav -o /sdcard/tm9307gra.wav \
-        sine:2,1000,3000 sine:1,2000,9307 chirp:2,9307
+        sine:4,1000,3000 sine:1,2000,9307 chirp:3,9307
     adb pull /sdcard/tm9307gra.wav $2
     adb pull /sdcard/aux9307gra.wav $2
 
diff --git a/services/audioflinger/tests/resampler_tests.cpp b/services/audioflinger/tests/resampler_tests.cpp
index 8624b62..169ce02 100644
--- a/services/audioflinger/tests/resampler_tests.cpp
+++ b/services/audioflinger/tests/resampler_tests.cpp
@@ -29,6 +29,7 @@
 #include <math.h>
 #include <vector>
 #include <utility>
+#include <iostream>
 #include <cutils/log.h>
 #include <gtest/gtest.h>
 #include <media/AudioBufferProvider.h>
@@ -153,6 +154,9 @@
     return accum / count;
 }
 
+// TI = resampler input type, int16_t or float
+// TO = resampler output type, int32_t or float
+template <typename TI, typename TO>
 void testStopbandDownconversion(size_t channels,
         unsigned inputFreq, unsigned outputFreq,
         unsigned passband, unsigned stopband,
@@ -161,20 +165,21 @@
     // create the provider
     std::vector<int> inputIncr;
     SignalProvider provider;
-    provider.setChirp<int16_t>(channels,
+    provider.setChirp<TI>(channels,
             0., inputFreq/2., inputFreq, inputFreq/2000.);
     provider.setIncr(inputIncr);
 
     // calculate the output size
     size_t outputFrames = ((int64_t) provider.getNumFrames() * outputFreq) / inputFreq;
-    size_t outputFrameSize = channels * sizeof(int32_t);
+    size_t outputFrameSize = channels * sizeof(TO);
     size_t outputSize = outputFrameSize * outputFrames;
     outputSize &= ~7;
 
     // create the resampler
     android::AudioResampler* resampler;
 
-    resampler = android::AudioResampler::create(AUDIO_FORMAT_PCM_16_BIT,
+    resampler = android::AudioResampler::create(
+            is_same<TI, int16_t>::value ? AUDIO_FORMAT_PCM_16_BIT : AUDIO_FORMAT_PCM_FLOAT,
             channels, outputFreq, quality);
     resampler->setSampleRate(inputFreq);
     resampler->setVolume(android::AudioResampler::UNITY_GAIN_FLOAT,
@@ -186,7 +191,7 @@
     void* reference = malloc(outputSize);
     resample(channels, reference, outputFrames, refIncr, &provider, resampler);
 
-    int32_t *out = reinterpret_cast<int32_t *>(reference);
+    TO *out = reinterpret_cast<TO *>(reference);
 
     // check signal energy in passband
     const unsigned passbandFrame = passband * outputFreq / 1000.;
@@ -206,10 +211,10 @@
                 provider.getNumFrames(), outputFrames,
                 passbandFrame, stopbandFrame, stopbandEnergy, passbandEnergy, dbAtten);
         for (size_t i = 0; i < 10; ++i) {
-            printf("%d\n", out[i+passbandFrame*channels]);
+            std::cout << out[i+passbandFrame*channels] << std::endl;
         }
         for (size_t i = 0; i < 10; ++i) {
-            printf("%d\n", out[i+stopbandFrame*channels]);
+            std::cout << out[i+stopbandFrame*channels] << std::endl;
         }
 #endif
     }
@@ -292,7 +297,7 @@
  * are properly suppressed.  It uses downsampling because the stopband can be
  * clearly isolated by input frequencies exceeding the output sample rate (nyquist).
  */
-TEST(audioflinger_resampler, stopbandresponse) {
+TEST(audioflinger_resampler, stopbandresponse_integer) {
     // not all of these may work (old resamplers fail on downsampling)
     static const enum android::AudioResampler::src_quality kQualityArray[] = {
             //android::AudioResampler::LOW_QUALITY,
@@ -307,13 +312,100 @@
     // in this test we assume a maximum transition band between 12kHz and 20kHz.
     // there must be at least 60dB relative attenuation between stopband and passband.
     for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
-        testStopbandDownconversion(2, 48000, 32000, 12000, 20000, kQualityArray[i]);
+        testStopbandDownconversion<int16_t, int32_t>(
+                2, 48000, 32000, 12000, 20000, kQualityArray[i]);
     }
 
     // in this test we assume a maximum transition band between 7kHz and 15kHz.
     // there must be at least 60dB relative attenuation between stopband and passband.
     // (the weird ratio triggers interpolative resampling)
     for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
-        testStopbandDownconversion(2, 48000, 22101, 7000, 15000, kQualityArray[i]);
+        testStopbandDownconversion<int16_t, int32_t>(
+                2, 48000, 22101, 7000, 15000, kQualityArray[i]);
     }
 }
+
+TEST(audioflinger_resampler, stopbandresponse_integer_multichannel) {
+    // not all of these may work (old resamplers fail on downsampling)
+    static const enum android::AudioResampler::src_quality kQualityArray[] = {
+            //android::AudioResampler::LOW_QUALITY,
+            //android::AudioResampler::MED_QUALITY,
+            //android::AudioResampler::HIGH_QUALITY,
+            //android::AudioResampler::VERY_HIGH_QUALITY,
+            android::AudioResampler::DYN_LOW_QUALITY,
+            android::AudioResampler::DYN_MED_QUALITY,
+            android::AudioResampler::DYN_HIGH_QUALITY,
+    };
+
+    // in this test we assume a maximum transition band between 12kHz and 20kHz.
+    // there must be at least 60dB relative attenuation between stopband and passband.
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testStopbandDownconversion<int16_t, int32_t>(
+                8, 48000, 32000, 12000, 20000, kQualityArray[i]);
+    }
+
+    // in this test we assume a maximum transition band between 7kHz and 15kHz.
+    // there must be at least 60dB relative attenuation between stopband and passband.
+    // (the weird ratio triggers interpolative resampling)
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testStopbandDownconversion<int16_t, int32_t>(
+                8, 48000, 22101, 7000, 15000, kQualityArray[i]);
+    }
+}
+
+TEST(audioflinger_resampler, stopbandresponse_float) {
+    // not all of these may work (old resamplers fail on downsampling)
+    static const enum android::AudioResampler::src_quality kQualityArray[] = {
+            //android::AudioResampler::LOW_QUALITY,
+            //android::AudioResampler::MED_QUALITY,
+            //android::AudioResampler::HIGH_QUALITY,
+            //android::AudioResampler::VERY_HIGH_QUALITY,
+            android::AudioResampler::DYN_LOW_QUALITY,
+            android::AudioResampler::DYN_MED_QUALITY,
+            android::AudioResampler::DYN_HIGH_QUALITY,
+    };
+
+    // in this test we assume a maximum transition band between 12kHz and 20kHz.
+    // there must be at least 60dB relative attenuation between stopband and passband.
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testStopbandDownconversion<float, float>(
+                2, 48000, 32000, 12000, 20000, kQualityArray[i]);
+    }
+
+    // in this test we assume a maximum transition band between 7kHz and 15kHz.
+    // there must be at least 60dB relative attenuation between stopband and passband.
+    // (the weird ratio triggers interpolative resampling)
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testStopbandDownconversion<float, float>(
+                2, 48000, 22101, 7000, 15000, kQualityArray[i]);
+    }
+}
+
+TEST(audioflinger_resampler, stopbandresponse_float_multichannel) {
+    // not all of these may work (old resamplers fail on downsampling)
+    static const enum android::AudioResampler::src_quality kQualityArray[] = {
+            //android::AudioResampler::LOW_QUALITY,
+            //android::AudioResampler::MED_QUALITY,
+            //android::AudioResampler::HIGH_QUALITY,
+            //android::AudioResampler::VERY_HIGH_QUALITY,
+            android::AudioResampler::DYN_LOW_QUALITY,
+            android::AudioResampler::DYN_MED_QUALITY,
+            android::AudioResampler::DYN_HIGH_QUALITY,
+    };
+
+    // in this test we assume a maximum transition band between 12kHz and 20kHz.
+    // there must be at least 60dB relative attenuation between stopband and passband.
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testStopbandDownconversion<float, float>(
+                8, 48000, 32000, 12000, 20000, kQualityArray[i]);
+    }
+
+    // in this test we assume a maximum transition band between 7kHz and 15kHz.
+    // there must be at least 60dB relative attenuation between stopband and passband.
+    // (the weird ratio triggers interpolative resampling)
+    for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+        testStopbandDownconversion<float, float>(
+                8, 48000, 22101, 7000, 15000, kQualityArray[i]);
+    }
+}
+
diff --git a/services/audioflinger/tests/test-mixer.cpp b/services/audioflinger/tests/test-mixer.cpp
index 3940702..5a00f40 100644
--- a/services/audioflinger/tests/test-mixer.cpp
+++ b/services/audioflinger/tests/test-mixer.cpp
@@ -36,11 +36,12 @@
 using namespace android;
 
 static void usage(const char* name) {
-    fprintf(stderr, "Usage: %s [-f] [-m]"
+    fprintf(stderr, "Usage: %s [-f] [-m] [-c channels]"
                     " [-s sample-rate] [-o <output-file>] [-a <aux-buffer-file>] [-P csv]"
                     " (<input-file> | <command>)+\n", name);
     fprintf(stderr, "    -f    enable floating point input track\n");
     fprintf(stderr, "    -m    enable floating point mixer output\n");
+    fprintf(stderr, "    -c    number of mixer output channels\n");
     fprintf(stderr, "    -s    mixer sample-rate\n");
     fprintf(stderr, "    -o    <output-file> WAV file, pcm16 (or float if -m specified)\n");
     fprintf(stderr, "    -a    <aux-buffer-file>\n");
@@ -90,7 +91,7 @@
     std::vector<int32_t> Names;
     std::vector<SignalProvider> Providers;
 
-    for (int ch; (ch = getopt(argc, argv, "fms:o:a:P:")) != -1;) {
+    for (int ch; (ch = getopt(argc, argv, "fmc:s:o:a:P:")) != -1;) {
         switch (ch) {
         case 'f':
             useInputFloat = true;
@@ -98,6 +99,9 @@
         case 'm':
             useMixerFloat = true;
             break;
+        case 'c':
+            outputChannels = atoi(optarg);
+            break;
         case 's':
             outputSampleRate = atoi(optarg);
             break;
@@ -160,7 +164,7 @@
 
             parseCSV(argv[i] + strlen(sine), v);
             if (v.size() == 3) {
-                printf("creating sine(%d %d)\n", v[0], v[1]);
+                printf("creating sine(%d %d %d)\n", v[0], v[1], v[2]);
                 if (useInputFloat) {
                     Providers[i].setSine<float>(v[0], v[1], v[2], kSeconds);
                 } else {
@@ -191,6 +195,8 @@
     const size_t outputFrameSize = outputChannels
             * (useMixerFloat ? sizeof(float) : sizeof(int16_t));
     const size_t outputSize = outputFrames * outputFrameSize;
+    const audio_channel_mask_t outputChannelMask =
+            audio_channel_out_mask_from_count(outputChannels);
     void *outputAddr = NULL;
     (void) posix_memalign(&outputAddr, 32, outputSize);
     memset(outputAddr, 0, outputSize);
@@ -224,15 +230,29 @@
         Names.push_back(name);
         mixer->setBufferProvider(name, &Providers[i]);
         mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
-                (void *) outputAddr);
+                (void *)outputAddr);
         mixer->setParameter(
                 name,
                 AudioMixer::TRACK,
-                AudioMixer::MIXER_FORMAT, (void *)mixerFormat);
-        mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::FORMAT,
+                AudioMixer::MIXER_FORMAT,
+                (void *)(uintptr_t)mixerFormat);
+        mixer->setParameter(
+                name,
+                AudioMixer::TRACK,
+                AudioMixer::FORMAT,
                 (void *)(uintptr_t)inputFormat);
         mixer->setParameter(
                 name,
+                AudioMixer::TRACK,
+                AudioMixer::MIXER_CHANNEL_MASK,
+                (void *)(uintptr_t)outputChannelMask);
+        mixer->setParameter(
+                name,
+                AudioMixer::TRACK,
+                AudioMixer::CHANNEL_MASK,
+                (void *)(uintptr_t)channelMask);
+        mixer->setParameter(
+                name,
                 AudioMixer::RESAMPLE,
                 AudioMixer::SAMPLE_RATE,
                 (void *)(uintptr_t)Providers[i].getSampleRate());
diff --git a/services/audioflinger/tests/test_utils.h b/services/audioflinger/tests/test_utils.h
index f954292..e446216 100644
--- a/services/audioflinger/tests/test_utils.h
+++ b/services/audioflinger/tests/test_utils.h
@@ -195,7 +195,7 @@
         T yt = convertValue<T>(y);
 
         for (size_t j = 0; j < channels; ++j) {
-            buffer[i*channels + j] = yt / (j + 1);
+            buffer[i*channels + j] = yt / T(j + 1);
         }
     }
 }
@@ -221,7 +221,7 @@
         T yt = convertValue<T>(y);
 
         for (size_t j = 0; j < channels; ++j) {
-            buffer[i*channels + j] = yt / (j + 1);
+            buffer[i*channels + j] = yt / T(j + 1);
         }
     }
 }
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index ed66e58..d45776b 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -112,7 +112,8 @@
                                     uint32_t samplingRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
-                                    audio_in_acoustics_t acoustics) = 0;
+                                    audio_in_acoustics_t acoustics,
+                                    audio_input_flags_t flags) = 0;
     // indicates to the audio policy manager that the input starts being used.
     virtual status_t startInput(audio_io_handle_t input) = 0;
     // indicates to the audio policy manager that the input stops being used.
diff --git a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
index a41721f..4a55bec 100644
--- a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
@@ -215,7 +215,7 @@
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
                                     int audioSession,
-                                    audio_input_flags_t flags __unused)
+                                    audio_input_flags_t flags)
 {
     if (mAudioPolicyManager == NULL) {
         return 0;
@@ -232,7 +232,8 @@
     Mutex::Autolock _l(mLock);
     // the audio_in_acoustics_t parameter is ignored by get_input()
     audio_io_handle_t input = mAudioPolicyManager->getInput(inputSource, samplingRate,
-                                                   format, channelMask, (audio_in_acoustics_t) 0);
+                                                   format, channelMask, (audio_in_acoustics_t) 0,
+                                                   flags);
 
     if (input == 0) {
         return input;
diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp
index cca1b34..737cacd 100644
--- a/services/audiopolicy/AudioPolicyManager.cpp
+++ b/services/audiopolicy/AudioPolicyManager.cpp
@@ -1055,13 +1055,14 @@
                                     uint32_t samplingRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
-                                    audio_in_acoustics_t acoustics)
+                                    audio_in_acoustics_t acoustics,
+                                    audio_input_flags_t flags)
 {
-    audio_io_handle_t input = 0;
-    audio_devices_t device = getDeviceForInputSource(inputSource);
+    ALOGV("getInput() inputSource %d, samplingRate %d, format %d, channelMask %x, acoustics %x, "
+          "flags %#x",
+          inputSource, samplingRate, format, channelMask, acoustics, flags);
 
-    ALOGV("getInput() inputSource %d, samplingRate %d, format %d, channelMask %x, acoustics %x",
-          inputSource, samplingRate, format, channelMask, acoustics);
+    audio_devices_t device = getDeviceForInputSource(inputSource);
 
     if (device == AUDIO_DEVICE_NONE) {
         ALOGW("getInput() could not find device for inputSource %d", inputSource);
@@ -1069,7 +1070,7 @@
     }
 
     // adapt channel selection to input source
-    switch(inputSource) {
+    switch (inputSource) {
     case AUDIO_SOURCE_VOICE_UPLINK:
         channelMask = AUDIO_CHANNEL_IN_VOICE_UPLINK;
         break;
@@ -1086,11 +1087,12 @@
     sp<IOProfile> profile = getInputProfile(device,
                                          samplingRate,
                                          format,
-                                         channelMask);
+                                         channelMask,
+                                         flags);
     if (profile == 0) {
-        ALOGW("getInput() could not find profile for device %04x, samplingRate %d, format %d, "
-                "channelMask %04x",
-                device, samplingRate, format, channelMask);
+        ALOGW("getInput() could not find profile for device 0x%X, samplingRate %u, format %#x, "
+                "channelMask 0x%X, flags %#x",
+                device, samplingRate, format, channelMask, flags);
         return 0;
     }
 
@@ -1107,19 +1109,21 @@
     inputDesc->mFormat = format;
     inputDesc->mChannelMask = channelMask;
     inputDesc->mRefCount = 0;
-    input = mpClientInterface->openInput(profile->mModule->mHandle,
+    inputDesc->mOpenRefCount = 1;
+
+    audio_io_handle_t input = mpClientInterface->openInput(profile->mModule->mHandle,
                                     &inputDesc->mDevice,
                                     &inputDesc->mSamplingRate,
                                     &inputDesc->mFormat,
                                     &inputDesc->mChannelMask,
-                                    AUDIO_INPUT_FLAG_FAST /*FIXME*/);
+                                    flags);
 
     // only accept input with the exact requested set of parameters
     if (input == 0 ||
         (samplingRate != inputDesc->mSamplingRate) ||
         (format != inputDesc->mFormat) ||
         (channelMask != inputDesc->mChannelMask)) {
-        ALOGI("getInput() failed opening input: samplingRate %d, format %d, channelMask %x",
+        ALOGW("getInput() failed opening input: samplingRate %d, format %d, channelMask %x",
                 samplingRate, format, channelMask);
         if (input != 0) {
             mpClientInterface->closeInput(input);
@@ -1141,37 +1145,41 @@
     }
     sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
 
-#ifdef AUDIO_POLICY_TEST
-    if (mTestInput == 0)
-#endif //AUDIO_POLICY_TEST
-    {
-        // refuse 2 active AudioRecord clients at the same time except if the active input
-        // uses AUDIO_SOURCE_HOTWORD in which case it is closed.
+    // virtual input devices are compatible with other input devices
+    if (!isVirtualInputDevice(inputDesc->mDevice)) {
+
+        // for a non-virtual input device, check if there is another (non-virtual) active input
         audio_io_handle_t activeInput = getActiveInput();
-        if (!isVirtualInputDevice(inputDesc->mDevice) && activeInput != 0) {
+        if (activeInput != 0 && activeInput != input) {
+
+            // If the already active input uses AUDIO_SOURCE_HOTWORD then it is closed,
+            // otherwise the active input continues and the new input cannot be started.
             sp<AudioInputDescriptor> activeDesc = mInputs.valueFor(activeInput);
             if (activeDesc->mInputSource == AUDIO_SOURCE_HOTWORD) {
-                ALOGW("startInput() preempting already started low-priority input %d", activeInput);
+                ALOGW("startInput(%d) preempting low-priority input %d", input, activeInput);
                 stopInput(activeInput);
                 releaseInput(activeInput);
             } else {
-                ALOGW("startInput() input %d failed: other input already started", input);
+                ALOGE("startInput(%d) failed: other input %d already started", input, activeInput);
                 return INVALID_OPERATION;
             }
         }
     }
 
-    setInputDevice(input, getNewInputDevice(input), true /* force */);
+    if (inputDesc->mRefCount == 0) {
+        setInputDevice(input, getNewInputDevice(input), true /* force */);
 
-    // automatically enable the remote submix output when input is started
-    if (audio_is_remote_submix_device(inputDesc->mDevice)) {
-        setDeviceConnectionState(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
-                AUDIO_POLICY_DEVICE_STATE_AVAILABLE, AUDIO_REMOTE_SUBMIX_DEVICE_ADDRESS);
+        // Automatically enable the remote submix output when input is started.
+        // For remote submix (a virtual device), we open only one input per capture request.
+        if (audio_is_remote_submix_device(inputDesc->mDevice)) {
+            setDeviceConnectionState(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+                    AUDIO_POLICY_DEVICE_STATE_AVAILABLE, AUDIO_REMOTE_SUBMIX_DEVICE_ADDRESS);
+        }
     }
 
     ALOGV("AudioPolicyManager::startInput() input source = %d", inputDesc->mInputSource);
 
-    inputDesc->mRefCount = 1;
+    inputDesc->mRefCount++;
     return NO_ERROR;
 }
 
@@ -1188,7 +1196,11 @@
     if (inputDesc->mRefCount == 0) {
         ALOGW("stopInput() input %d already stopped", input);
         return INVALID_OPERATION;
-    } else {
+    }
+
+    inputDesc->mRefCount--;
+    if (inputDesc->mRefCount == 0) {
+
         // automatically disable the remote submix output when input is stopped
         if (audio_is_remote_submix_device(inputDesc->mDevice)) {
             setDeviceConnectionState(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
@@ -1196,9 +1208,8 @@
         }
 
         resetInputDevice(input);
-        inputDesc->mRefCount = 0;
-        return NO_ERROR;
     }
+    return NO_ERROR;
 }
 
 void AudioPolicyManager::releaseInput(audio_io_handle_t input)
@@ -1209,6 +1220,18 @@
         ALOGW("releaseInput() releasing unknown input %d", input);
         return;
     }
+    sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
+    ALOG_ASSERT(inputDesc != 0);
+    if (inputDesc->mOpenRefCount == 0) {
+        ALOGW("releaseInput() invalid open ref count %d", inputDesc->mOpenRefCount);
+        return;
+    }
+    inputDesc->mOpenRefCount--;
+    if (inputDesc->mOpenRefCount > 0) {
+        ALOGV("releaseInput() exit > 0");
+        return;
+    }
+
     mpClientInterface->closeInput(input);
     mInputs.removeItem(input);
     nextAudioPortGeneration();
@@ -1874,7 +1897,8 @@
                                                        patch->sources[0].sample_rate,
                                                      patch->sources[0].format,
                                                      patch->sources[0].channel_mask,
-                                                     AUDIO_OUTPUT_FLAG_NONE)) {
+                                                     AUDIO_OUTPUT_FLAG_NONE /*FIXME*/)) {
+            ALOGV("createAudioPatch() profile not supported");
             return INVALID_OPERATION;
         }
         // TODO: reconfigure output format and channels here
@@ -1919,7 +1943,10 @@
                                                            patch->sinks[0].sample_rate,
                                                          patch->sinks[0].format,
                                                          patch->sinks[0].channel_mask,
-                                                         AUDIO_OUTPUT_FLAG_NONE)) {
+                                                         // FIXME for the parameter type,
+                                                         // and the NONE
+                                                         (audio_output_flags_t)
+                                                            AUDIO_INPUT_FLAG_NONE)) {
                 return INVALID_OPERATION;
             }
             // TODO: reconfigure output format and channels here
@@ -1963,9 +1990,20 @@
             srcDeviceDesc->toAudioPortConfig(&newPatch.sources[0], &patch->sources[0]);
             sinkDeviceDesc->toAudioPortConfig(&newPatch.sinks[0], &patch->sinks[0]);
 
-            // TODO: add support for devices on different HW modules
             if (srcDeviceDesc->mModule != sinkDeviceDesc->mModule) {
-                return INVALID_OPERATION;
+                SortedVector<audio_io_handle_t> outputs =
+                                        getOutputsForDevice(sinkDeviceDesc->mDeviceType, mOutputs);
+                // if the sink device is reachable via an opened output stream, request to go via
+                // this output stream by adding a second source to the patch description
+                audio_io_handle_t output = selectOutput(outputs, AUDIO_OUTPUT_FLAG_NONE);
+                if (output != AUDIO_IO_HANDLE_NONE) {
+                    sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
+                    if (outputDesc->isDuplicated()) {
+                        return INVALID_OPERATION;
+                    }
+                    outputDesc->toAudioPortConfig(&newPatch.sources[1], &patch->sources[0]);
+                    newPatch.num_sources = 2;
+                }
             }
             // TODO: check from routing capabilities in config file and other conflicting patches
 
@@ -2270,12 +2308,17 @@
                 continue;
             }
 
-            audio_devices_t profileTypes = outProfile->mSupportedDevices.types();
-            if ((profileTypes & outputDeviceTypes) &&
+            audio_devices_t profileType = outProfile->mSupportedDevices.types();
+            if ((profileType & mDefaultOutputDevice->mDeviceType) != AUDIO_DEVICE_NONE) {
+                profileType = mDefaultOutputDevice->mDeviceType;
+            } else {
+                profileType = outProfile->mSupportedDevices[0]->mDeviceType;
+            }
+            if ((profileType & outputDeviceTypes) &&
                     ((outProfile->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) == 0)) {
                 sp<AudioOutputDescriptor> outputDesc = new AudioOutputDescriptor(outProfile);
 
-                outputDesc->mDevice = (audio_devices_t)(mDefaultOutputDevice->mDeviceType & profileTypes);
+                outputDesc->mDevice = profileType;
                 audio_io_handle_t output = mpClientInterface->openOutput(
                                                 outProfile->mModule->mHandle,
                                                 &outputDesc->mDevice,
@@ -2304,7 +2347,6 @@
                         mPrimaryOutput = output;
                     }
                     addOutput(output, outputDesc);
-                    ALOGI("CSTOR setOutputDevice %08x", outputDesc->mDevice);
                     setOutputDevice(output,
                                     outputDesc->mDevice,
                                     true);
@@ -2322,19 +2364,19 @@
                 continue;
             }
 
-            audio_devices_t profileTypes = inProfile->mSupportedDevices.types();
-            if (profileTypes & inputDeviceTypes) {
+            audio_devices_t profileType = inProfile->mSupportedDevices[0]->mDeviceType;
+            if (profileType & inputDeviceTypes) {
                 sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(inProfile);
 
                 inputDesc->mInputSource = AUDIO_SOURCE_MIC;
-                inputDesc->mDevice = inProfile->mSupportedDevices[0]->mDeviceType;
+                inputDesc->mDevice = profileType;
                 audio_io_handle_t input = mpClientInterface->openInput(
                                                     inProfile->mModule->mHandle,
                                                     &inputDesc->mDevice,
                                                     &inputDesc->mSamplingRate,
                                                     &inputDesc->mFormat,
                                                     &inputDesc->mChannelMask,
-                                                    AUDIO_INPUT_FLAG_FAST /*FIXME*/);
+                                                    AUDIO_INPUT_FLAG_NONE /*FIXME*/);
 
                 if (input != 0) {
                     for (size_t k = 0; k  < inProfile->mSupportedDevices.size(); k++) {
@@ -2659,7 +2701,8 @@
                 continue;
             }
 
-            ALOGV("opening output for device %08x with params %s", device, address.string());
+            ALOGV("opening output for device %08x with params %s profile %p",
+                                                      device, address.string(), profile.get());
             desc = new AudioOutputDescriptor(profile);
             desc->mDevice = device;
             audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
@@ -2901,7 +2944,7 @@
                                             &desc->mSamplingRate,
                                             &desc->mFormat,
                                             &desc->mChannelMask,
-                                            AUDIO_INPUT_FLAG_FAST /*FIXME*/);
+                                            AUDIO_INPUT_FLAG_NONE /*FIXME*/);
 
             if (input != 0) {
                 if (!address.isEmpty()) {
@@ -3833,6 +3876,11 @@
         if (!deviceList.isEmpty()) {
             struct audio_patch patch;
             inputDesc->toAudioPortConfig(&patch.sinks[0]);
+            // AUDIO_SOURCE_HOTWORD is for internal use only:
+            // handled as AUDIO_SOURCE_VOICE_RECOGNITION by the audio HAL
+            if (patch.sinks[0].ext.mix.usecase.source == AUDIO_SOURCE_HOTWORD) {
+                patch.sinks[0].ext.mix.usecase.source = AUDIO_SOURCE_VOICE_RECOGNITION;
+            }
             patch.num_sinks = 1;
             //only one input device for now
             deviceList.itemAt(0)->toAudioPortConfig(&patch.sources[0]);
@@ -3903,7 +3951,8 @@
 sp<AudioPolicyManager::IOProfile> AudioPolicyManager::getInputProfile(audio_devices_t device,
                                                    uint32_t samplingRate,
                                                    audio_format_t format,
-                                                   audio_channel_mask_t channelMask)
+                                                   audio_channel_mask_t channelMask,
+                                                   audio_input_flags_t flags __unused)
 {
     // Choose an input profile based on the requested capture parameters: select the first available
     // profile supporting all requested parameters.
@@ -4259,14 +4308,6 @@
         device = outputDesc->device();
     }
 
-    // if volume is not 0 (not muted), force media volume to max on digital output
-    if (stream == AUDIO_STREAM_MUSIC &&
-        index != mStreams[stream].mIndexMin &&
-        (device == AUDIO_DEVICE_OUT_AUX_DIGITAL ||
-         device == AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET)) {
-        return 1.0;
-    }
-
     volume = volIndexToAmpl(device, streamDesc, index);
 
     // if a headset is connected, apply the following rules to ring tones and notifications
@@ -4766,6 +4807,9 @@
     result.append(buffer);
     snprintf(buffer, SIZE, " Ref Count %d\n", mRefCount);
     result.append(buffer);
+    snprintf(buffer, SIZE, " Open Ref Count %d\n", mOpenRefCount);
+    result.append(buffer);
+
     write(fd, result.string(), result.size());
 
     return NO_ERROR;
@@ -5316,7 +5360,9 @@
 const audio_format_t AudioPolicyManager::AudioPort::sPcmFormatCompareTable[] = {
         AUDIO_FORMAT_DEFAULT,
         AUDIO_FORMAT_PCM_16_BIT,
+        AUDIO_FORMAT_PCM_8_24_BIT,
         AUDIO_FORMAT_PCM_24_BIT_PACKED,
+        AUDIO_FORMAT_PCM_32_BIT,
 };
 
 int AudioPolicyManager::AudioPort::compareFormats(audio_format_t format1,
@@ -5943,7 +5989,7 @@
 
 void AudioPolicyManager::DeviceDescriptor::toAudioPort(struct audio_port *port) const
 {
-    ALOGV("DeviceVector::toAudioPort() handle %d type %x", mId, mDeviceType);
+    ALOGV("DeviceDescriptor::toAudioPort() handle %d type %x", mId, mDeviceType);
     AudioPort::toAudioPort(port);
     port->id = mId;
     toAudioPortConfig(&port->active_config);
diff --git a/services/audiopolicy/AudioPolicyManager.h b/services/audiopolicy/AudioPolicyManager.h
index 4caecca..e9ec78e 100644
--- a/services/audiopolicy/AudioPolicyManager.h
+++ b/services/audiopolicy/AudioPolicyManager.h
@@ -107,7 +107,8 @@
                                             uint32_t samplingRate,
                                             audio_format_t format,
                                             audio_channel_mask_t channelMask,
-                                            audio_in_acoustics_t acoustics);
+                                            audio_in_acoustics_t acoustics,
+                                            audio_input_flags_t flags);
 
         // indicates to the audio policy manager that the input starts being used.
         virtual status_t startInput(audio_io_handle_t input);
@@ -467,6 +468,7 @@
             audio_devices_t mDevice;                    // current device this input is routed to
             audio_patch_handle_t mPatchHandle;
             uint32_t mRefCount;                         // number of AudioRecord clients using this output
+            uint32_t mOpenRefCount;
             audio_source_t mInputSource;                // input source selected by application (mediarecorder.h)
             const sp<IOProfile> mProfile;                  // I/O profile this output derives from
 
@@ -674,7 +676,8 @@
         sp<IOProfile> getInputProfile(audio_devices_t device,
                                    uint32_t samplingRate,
                                    audio_format_t format,
-                                   audio_channel_mask_t channelMask);
+                                   audio_channel_mask_t channelMask,
+                                   audio_input_flags_t flags);
         sp<IOProfile> getProfileForDirectOutput(audio_devices_t device,
                                                        uint32_t samplingRate,
                                                        audio_format_t format,
diff --git a/services/audiopolicy/AudioPolicyService.cpp b/services/audiopolicy/AudioPolicyService.cpp
index ae9cc35..7f14960 100644
--- a/services/audiopolicy/AudioPolicyService.cpp
+++ b/services/audiopolicy/AudioPolicyService.cpp
@@ -514,21 +514,23 @@
                 break;
             }
         }
-        // release delayed commands wake lock
-        if (mAudioCommands.isEmpty()) {
-            release_wake_lock(mName.string());
-        }
         // release mLock before releasing strong reference on the service as
         // AudioPolicyService destructor calls AudioCommandThread::exit() which acquires mLock.
         mLock.unlock();
         svc.clear();
         mLock.lock();
-        if (!exitPending()) {
+        if (!exitPending() && mAudioCommands.isEmpty()) {
+            // release delayed commands wake lock
+            release_wake_lock(mName.string());
             ALOGV("AudioCommandThread() going to sleep");
             mWaitWorkCV.waitRelative(mLock, waitTime);
             ALOGV("AudioCommandThread() waking up");
         }
     }
+    // release delayed commands wake lock before quitting
+    if (!mAudioCommands.isEmpty()) {
+        release_wake_lock(mName.string());
+    }
     mLock.unlock();
     return false;
 }
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 1642896..9721e13 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -906,6 +906,13 @@
                 ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)",
                         __FUNCTION__, mCameraId, strerror(-res), res);
             }
+            // Clean up recording stream
+            res = mStreamingProcessor->deleteRecordingStream();
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Unable to delete recording stream before "
+                        "stop preview: %s (%d)",
+                        __FUNCTION__, mCameraId, strerror(-res), res);
+            }
             // no break
         case Parameters::WAITING_FOR_PREVIEW_WINDOW: {
             SharedParameters::Lock l(mParameters);
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 3004d3e..44e8822 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -1497,6 +1497,9 @@
 
     ALOGV("%s: Camera %d: Stream configuration complete", __FUNCTION__, mId);
 
+    // tear down the deleted streams after configure streams.
+    mDeletedStreams.clear();
+
     return OK;
 }
 
@@ -1794,8 +1797,9 @@
                     return;
                 }
                 isPartialResult = (result->partial_result < mNumPartialResults);
-                request.partialResult.collectedResult.append(
-                    result->result);
+                if (isPartialResult) {
+                    request.partialResult.collectedResult.append(result->result);
+                }
             } else {
                 camera_metadata_ro_entry_t partialResultEntry;
                 res = find_camera_metadata_ro_entry(result->result,
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 6cbb9f4..f963326 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -76,6 +76,8 @@
             /*out*/
             sp<Fence> *releaseFenceOut);
 
+    virtual status_t disconnectLocked();
+
     sp<ANativeWindow> mConsumer;
   private:
     int               mTransform;
@@ -91,7 +93,6 @@
             nsecs_t timestamp);
 
     virtual status_t configureQueueLocked();
-    virtual status_t disconnectLocked();
 
     virtual status_t getEndpointUsage(uint32_t *usage);
 
diff --git a/services/camera/libcameraservice/device3/Camera3ZslStream.cpp b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
index 6c298f9..92bf81b 100644
--- a/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
@@ -318,11 +318,21 @@
 status_t Camera3ZslStream::clearInputRingBuffer() {
     Mutex::Autolock l(mLock);
 
+    return clearInputRingBufferLocked();
+}
+
+status_t Camera3ZslStream::clearInputRingBufferLocked() {
     mInputBufferQueue.clear();
 
     return mProducer->clear();
 }
 
+status_t Camera3ZslStream::disconnectLocked() {
+    clearInputRingBufferLocked();
+
+    return Camera3OutputStream::disconnectLocked();
+}
+
 status_t Camera3ZslStream::setTransform(int /*transform*/) {
     ALOGV("%s: Not implemented", __FUNCTION__);
     return INVALID_OPERATION;
diff --git a/services/camera/libcameraservice/device3/Camera3ZslStream.h b/services/camera/libcameraservice/device3/Camera3ZslStream.h
index 6721832..d89c38d 100644
--- a/services/camera/libcameraservice/device3/Camera3ZslStream.h
+++ b/services/camera/libcameraservice/device3/Camera3ZslStream.h
@@ -96,6 +96,12 @@
             bool output,
             /*out*/
             sp<Fence> *releaseFenceOut);
+
+    // Disconnet the Camera3ZslStream specific bufferQueues.
+    virtual status_t disconnectLocked();
+
+    status_t clearInputRingBufferLocked();
+
 }; // class Camera3ZslStream
 
 }; // namespace camera3